##// END OF EJS Templates
clfilter: ensure cache invalidation is done on the main unfiltered repo...
Pierre-Yves David -
r17997:6089956e default
parent child Browse files
Show More
@@ -1,2646 +1,2648 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import bin, hex, nullid, nullrev, short
7 from node import bin, hex, nullid, nullrev, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock, transaction, store, encoding, base85
11 import lock, transaction, store, encoding, base85
12 import scmutil, util, extensions, hook, error, revset
12 import scmutil, util, extensions, hook, error, revset
13 import match as matchmod
13 import match as matchmod
14 import merge as mergemod
14 import merge as mergemod
15 import tags as tagsmod
15 import tags as tagsmod
16 from lock import release
16 from lock import release
17 import weakref, errno, os, time, inspect
17 import weakref, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19 filecache = scmutil.filecache
19 filecache = scmutil.filecache
20
20
21 class storecache(filecache):
21 class storecache(filecache):
22 """filecache for files in the store"""
22 """filecache for files in the store"""
23 def join(self, obj, fname):
23 def join(self, obj, fname):
24 return obj.sjoin(fname)
24 return obj.sjoin(fname)
25
25
26 def unfilteredmeth(orig):
26 def unfilteredmeth(orig):
27 """decorate method that always need to be run on unfiltered version"""
27 """decorate method that always need to be run on unfiltered version"""
28 def wrapper(repo, *args, **kwargs):
28 def wrapper(repo, *args, **kwargs):
29 return orig(repo.unfiltered(), *args, **kwargs)
29 return orig(repo.unfiltered(), *args, **kwargs)
30 return wrapper
30 return wrapper
31
31
32 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
32 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
33 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
33 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
34
34
35 class localpeer(peer.peerrepository):
35 class localpeer(peer.peerrepository):
36 '''peer for a local repo; reflects only the most recent API'''
36 '''peer for a local repo; reflects only the most recent API'''
37
37
38 def __init__(self, repo, caps=MODERNCAPS):
38 def __init__(self, repo, caps=MODERNCAPS):
39 peer.peerrepository.__init__(self)
39 peer.peerrepository.__init__(self)
40 self._repo = repo
40 self._repo = repo
41 self.ui = repo.ui
41 self.ui = repo.ui
42 self._caps = repo._restrictcapabilities(caps)
42 self._caps = repo._restrictcapabilities(caps)
43 self.requirements = repo.requirements
43 self.requirements = repo.requirements
44 self.supportedformats = repo.supportedformats
44 self.supportedformats = repo.supportedformats
45
45
46 def close(self):
46 def close(self):
47 self._repo.close()
47 self._repo.close()
48
48
49 def _capabilities(self):
49 def _capabilities(self):
50 return self._caps
50 return self._caps
51
51
52 def local(self):
52 def local(self):
53 return self._repo
53 return self._repo
54
54
55 def canpush(self):
55 def canpush(self):
56 return True
56 return True
57
57
58 def url(self):
58 def url(self):
59 return self._repo.url()
59 return self._repo.url()
60
60
61 def lookup(self, key):
61 def lookup(self, key):
62 return self._repo.lookup(key)
62 return self._repo.lookup(key)
63
63
64 def branchmap(self):
64 def branchmap(self):
65 return discovery.visiblebranchmap(self._repo)
65 return discovery.visiblebranchmap(self._repo)
66
66
67 def heads(self):
67 def heads(self):
68 return discovery.visibleheads(self._repo)
68 return discovery.visibleheads(self._repo)
69
69
70 def known(self, nodes):
70 def known(self, nodes):
71 return self._repo.known(nodes)
71 return self._repo.known(nodes)
72
72
73 def getbundle(self, source, heads=None, common=None):
73 def getbundle(self, source, heads=None, common=None):
74 return self._repo.getbundle(source, heads=heads, common=common)
74 return self._repo.getbundle(source, heads=heads, common=common)
75
75
76 # TODO We might want to move the next two calls into legacypeer and add
76 # TODO We might want to move the next two calls into legacypeer and add
77 # unbundle instead.
77 # unbundle instead.
78
78
79 def lock(self):
79 def lock(self):
80 return self._repo.lock()
80 return self._repo.lock()
81
81
82 def addchangegroup(self, cg, source, url):
82 def addchangegroup(self, cg, source, url):
83 return self._repo.addchangegroup(cg, source, url)
83 return self._repo.addchangegroup(cg, source, url)
84
84
85 def pushkey(self, namespace, key, old, new):
85 def pushkey(self, namespace, key, old, new):
86 return self._repo.pushkey(namespace, key, old, new)
86 return self._repo.pushkey(namespace, key, old, new)
87
87
88 def listkeys(self, namespace):
88 def listkeys(self, namespace):
89 return self._repo.listkeys(namespace)
89 return self._repo.listkeys(namespace)
90
90
91 def debugwireargs(self, one, two, three=None, four=None, five=None):
91 def debugwireargs(self, one, two, three=None, four=None, five=None):
92 '''used to test argument passing over the wire'''
92 '''used to test argument passing over the wire'''
93 return "%s %s %s %s %s" % (one, two, three, four, five)
93 return "%s %s %s %s %s" % (one, two, three, four, five)
94
94
95 class locallegacypeer(localpeer):
95 class locallegacypeer(localpeer):
96 '''peer extension which implements legacy methods too; used for tests with
96 '''peer extension which implements legacy methods too; used for tests with
97 restricted capabilities'''
97 restricted capabilities'''
98
98
99 def __init__(self, repo):
99 def __init__(self, repo):
100 localpeer.__init__(self, repo, caps=LEGACYCAPS)
100 localpeer.__init__(self, repo, caps=LEGACYCAPS)
101
101
102 def branches(self, nodes):
102 def branches(self, nodes):
103 return self._repo.branches(nodes)
103 return self._repo.branches(nodes)
104
104
105 def between(self, pairs):
105 def between(self, pairs):
106 return self._repo.between(pairs)
106 return self._repo.between(pairs)
107
107
108 def changegroup(self, basenodes, source):
108 def changegroup(self, basenodes, source):
109 return self._repo.changegroup(basenodes, source)
109 return self._repo.changegroup(basenodes, source)
110
110
111 def changegroupsubset(self, bases, heads, source):
111 def changegroupsubset(self, bases, heads, source):
112 return self._repo.changegroupsubset(bases, heads, source)
112 return self._repo.changegroupsubset(bases, heads, source)
113
113
114 class localrepository(object):
114 class localrepository(object):
115
115
116 supportedformats = set(('revlogv1', 'generaldelta'))
116 supportedformats = set(('revlogv1', 'generaldelta'))
117 supported = supportedformats | set(('store', 'fncache', 'shared',
117 supported = supportedformats | set(('store', 'fncache', 'shared',
118 'dotencode'))
118 'dotencode'))
119 openerreqs = set(('revlogv1', 'generaldelta'))
119 openerreqs = set(('revlogv1', 'generaldelta'))
120 requirements = ['revlogv1']
120 requirements = ['revlogv1']
121
121
122 def _baserequirements(self, create):
122 def _baserequirements(self, create):
123 return self.requirements[:]
123 return self.requirements[:]
124
124
125 def __init__(self, baseui, path=None, create=False):
125 def __init__(self, baseui, path=None, create=False):
126 self.wvfs = scmutil.vfs(path, expand=True)
126 self.wvfs = scmutil.vfs(path, expand=True)
127 self.wopener = self.wvfs
127 self.wopener = self.wvfs
128 self.root = self.wvfs.base
128 self.root = self.wvfs.base
129 self.path = self.wvfs.join(".hg")
129 self.path = self.wvfs.join(".hg")
130 self.origroot = path
130 self.origroot = path
131 self.auditor = scmutil.pathauditor(self.root, self._checknested)
131 self.auditor = scmutil.pathauditor(self.root, self._checknested)
132 self.vfs = scmutil.vfs(self.path)
132 self.vfs = scmutil.vfs(self.path)
133 self.opener = self.vfs
133 self.opener = self.vfs
134 self.baseui = baseui
134 self.baseui = baseui
135 self.ui = baseui.copy()
135 self.ui = baseui.copy()
136 # A list of callback to shape the phase if no data were found.
136 # A list of callback to shape the phase if no data were found.
137 # Callback are in the form: func(repo, roots) --> processed root.
137 # Callback are in the form: func(repo, roots) --> processed root.
138 # This list it to be filled by extension during repo setup
138 # This list it to be filled by extension during repo setup
139 self._phasedefaults = []
139 self._phasedefaults = []
140 try:
140 try:
141 self.ui.readconfig(self.join("hgrc"), self.root)
141 self.ui.readconfig(self.join("hgrc"), self.root)
142 extensions.loadall(self.ui)
142 extensions.loadall(self.ui)
143 except IOError:
143 except IOError:
144 pass
144 pass
145
145
146 if not self.vfs.isdir():
146 if not self.vfs.isdir():
147 if create:
147 if create:
148 if not self.wvfs.exists():
148 if not self.wvfs.exists():
149 self.wvfs.makedirs()
149 self.wvfs.makedirs()
150 self.vfs.makedir(notindexed=True)
150 self.vfs.makedir(notindexed=True)
151 requirements = self._baserequirements(create)
151 requirements = self._baserequirements(create)
152 if self.ui.configbool('format', 'usestore', True):
152 if self.ui.configbool('format', 'usestore', True):
153 self.vfs.mkdir("store")
153 self.vfs.mkdir("store")
154 requirements.append("store")
154 requirements.append("store")
155 if self.ui.configbool('format', 'usefncache', True):
155 if self.ui.configbool('format', 'usefncache', True):
156 requirements.append("fncache")
156 requirements.append("fncache")
157 if self.ui.configbool('format', 'dotencode', True):
157 if self.ui.configbool('format', 'dotencode', True):
158 requirements.append('dotencode')
158 requirements.append('dotencode')
159 # create an invalid changelog
159 # create an invalid changelog
160 self.vfs.append(
160 self.vfs.append(
161 "00changelog.i",
161 "00changelog.i",
162 '\0\0\0\2' # represents revlogv2
162 '\0\0\0\2' # represents revlogv2
163 ' dummy changelog to prevent using the old repo layout'
163 ' dummy changelog to prevent using the old repo layout'
164 )
164 )
165 if self.ui.configbool('format', 'generaldelta', False):
165 if self.ui.configbool('format', 'generaldelta', False):
166 requirements.append("generaldelta")
166 requirements.append("generaldelta")
167 requirements = set(requirements)
167 requirements = set(requirements)
168 else:
168 else:
169 raise error.RepoError(_("repository %s not found") % path)
169 raise error.RepoError(_("repository %s not found") % path)
170 elif create:
170 elif create:
171 raise error.RepoError(_("repository %s already exists") % path)
171 raise error.RepoError(_("repository %s already exists") % path)
172 else:
172 else:
173 try:
173 try:
174 requirements = scmutil.readrequires(self.vfs, self.supported)
174 requirements = scmutil.readrequires(self.vfs, self.supported)
175 except IOError, inst:
175 except IOError, inst:
176 if inst.errno != errno.ENOENT:
176 if inst.errno != errno.ENOENT:
177 raise
177 raise
178 requirements = set()
178 requirements = set()
179
179
180 self.sharedpath = self.path
180 self.sharedpath = self.path
181 try:
181 try:
182 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
182 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
183 if not os.path.exists(s):
183 if not os.path.exists(s):
184 raise error.RepoError(
184 raise error.RepoError(
185 _('.hg/sharedpath points to nonexistent directory %s') % s)
185 _('.hg/sharedpath points to nonexistent directory %s') % s)
186 self.sharedpath = s
186 self.sharedpath = s
187 except IOError, inst:
187 except IOError, inst:
188 if inst.errno != errno.ENOENT:
188 if inst.errno != errno.ENOENT:
189 raise
189 raise
190
190
191 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
191 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
192 self.spath = self.store.path
192 self.spath = self.store.path
193 self.svfs = self.store.vfs
193 self.svfs = self.store.vfs
194 self.sopener = self.svfs
194 self.sopener = self.svfs
195 self.sjoin = self.store.join
195 self.sjoin = self.store.join
196 self.vfs.createmode = self.store.createmode
196 self.vfs.createmode = self.store.createmode
197 self._applyrequirements(requirements)
197 self._applyrequirements(requirements)
198 if create:
198 if create:
199 self._writerequirements()
199 self._writerequirements()
200
200
201
201
202 self._branchcache = None
202 self._branchcache = None
203 self._branchcachetip = None
203 self._branchcachetip = None
204 self.filterpats = {}
204 self.filterpats = {}
205 self._datafilters = {}
205 self._datafilters = {}
206 self._transref = self._lockref = self._wlockref = None
206 self._transref = self._lockref = self._wlockref = None
207
207
208 # A cache for various files under .hg/ that tracks file changes,
208 # A cache for various files under .hg/ that tracks file changes,
209 # (used by the filecache decorator)
209 # (used by the filecache decorator)
210 #
210 #
211 # Maps a property name to its util.filecacheentry
211 # Maps a property name to its util.filecacheentry
212 self._filecache = {}
212 self._filecache = {}
213
213
214 def close(self):
214 def close(self):
215 pass
215 pass
216
216
217 def _restrictcapabilities(self, caps):
217 def _restrictcapabilities(self, caps):
218 return caps
218 return caps
219
219
220 def _applyrequirements(self, requirements):
220 def _applyrequirements(self, requirements):
221 self.requirements = requirements
221 self.requirements = requirements
222 self.sopener.options = dict((r, 1) for r in requirements
222 self.sopener.options = dict((r, 1) for r in requirements
223 if r in self.openerreqs)
223 if r in self.openerreqs)
224
224
225 def _writerequirements(self):
225 def _writerequirements(self):
226 reqfile = self.opener("requires", "w")
226 reqfile = self.opener("requires", "w")
227 for r in self.requirements:
227 for r in self.requirements:
228 reqfile.write("%s\n" % r)
228 reqfile.write("%s\n" % r)
229 reqfile.close()
229 reqfile.close()
230
230
231 def _checknested(self, path):
231 def _checknested(self, path):
232 """Determine if path is a legal nested repository."""
232 """Determine if path is a legal nested repository."""
233 if not path.startswith(self.root):
233 if not path.startswith(self.root):
234 return False
234 return False
235 subpath = path[len(self.root) + 1:]
235 subpath = path[len(self.root) + 1:]
236 normsubpath = util.pconvert(subpath)
236 normsubpath = util.pconvert(subpath)
237
237
238 # XXX: Checking against the current working copy is wrong in
238 # XXX: Checking against the current working copy is wrong in
239 # the sense that it can reject things like
239 # the sense that it can reject things like
240 #
240 #
241 # $ hg cat -r 10 sub/x.txt
241 # $ hg cat -r 10 sub/x.txt
242 #
242 #
243 # if sub/ is no longer a subrepository in the working copy
243 # if sub/ is no longer a subrepository in the working copy
244 # parent revision.
244 # parent revision.
245 #
245 #
246 # However, it can of course also allow things that would have
246 # However, it can of course also allow things that would have
247 # been rejected before, such as the above cat command if sub/
247 # been rejected before, such as the above cat command if sub/
248 # is a subrepository now, but was a normal directory before.
248 # is a subrepository now, but was a normal directory before.
249 # The old path auditor would have rejected by mistake since it
249 # The old path auditor would have rejected by mistake since it
250 # panics when it sees sub/.hg/.
250 # panics when it sees sub/.hg/.
251 #
251 #
252 # All in all, checking against the working copy seems sensible
252 # All in all, checking against the working copy seems sensible
253 # since we want to prevent access to nested repositories on
253 # since we want to prevent access to nested repositories on
254 # the filesystem *now*.
254 # the filesystem *now*.
255 ctx = self[None]
255 ctx = self[None]
256 parts = util.splitpath(subpath)
256 parts = util.splitpath(subpath)
257 while parts:
257 while parts:
258 prefix = '/'.join(parts)
258 prefix = '/'.join(parts)
259 if prefix in ctx.substate:
259 if prefix in ctx.substate:
260 if prefix == normsubpath:
260 if prefix == normsubpath:
261 return True
261 return True
262 else:
262 else:
263 sub = ctx.sub(prefix)
263 sub = ctx.sub(prefix)
264 return sub.checknested(subpath[len(prefix) + 1:])
264 return sub.checknested(subpath[len(prefix) + 1:])
265 else:
265 else:
266 parts.pop()
266 parts.pop()
267 return False
267 return False
268
268
269 def peer(self):
269 def peer(self):
270 return localpeer(self) # not cached to avoid reference cycle
270 return localpeer(self) # not cached to avoid reference cycle
271
271
272 def unfiltered(self):
272 def unfiltered(self):
273 """Return unfiltered version of the repository
273 """Return unfiltered version of the repository
274
274
275 Intended to be ovewritten by filtered repo."""
275 Intended to be ovewritten by filtered repo."""
276 return self
276 return self
277
277
278 @filecache('bookmarks')
278 @filecache('bookmarks')
279 def _bookmarks(self):
279 def _bookmarks(self):
280 return bookmarks.bmstore(self)
280 return bookmarks.bmstore(self)
281
281
282 @filecache('bookmarks.current')
282 @filecache('bookmarks.current')
283 def _bookmarkcurrent(self):
283 def _bookmarkcurrent(self):
284 return bookmarks.readcurrent(self)
284 return bookmarks.readcurrent(self)
285
285
286 def bookmarkheads(self, bookmark):
286 def bookmarkheads(self, bookmark):
287 name = bookmark.split('@', 1)[0]
287 name = bookmark.split('@', 1)[0]
288 heads = []
288 heads = []
289 for mark, n in self._bookmarks.iteritems():
289 for mark, n in self._bookmarks.iteritems():
290 if mark.split('@', 1)[0] == name:
290 if mark.split('@', 1)[0] == name:
291 heads.append(n)
291 heads.append(n)
292 return heads
292 return heads
293
293
294 @storecache('phaseroots')
294 @storecache('phaseroots')
295 def _phasecache(self):
295 def _phasecache(self):
296 return phases.phasecache(self, self._phasedefaults)
296 return phases.phasecache(self, self._phasedefaults)
297
297
298 @storecache('obsstore')
298 @storecache('obsstore')
299 def obsstore(self):
299 def obsstore(self):
300 store = obsolete.obsstore(self.sopener)
300 store = obsolete.obsstore(self.sopener)
301 if store and not obsolete._enabled:
301 if store and not obsolete._enabled:
302 # message is rare enough to not be translated
302 # message is rare enough to not be translated
303 msg = 'obsolete feature not enabled but %i markers found!\n'
303 msg = 'obsolete feature not enabled but %i markers found!\n'
304 self.ui.warn(msg % len(list(store)))
304 self.ui.warn(msg % len(list(store)))
305 return store
305 return store
306
306
307 @propertycache
307 @propertycache
308 def hiddenrevs(self):
308 def hiddenrevs(self):
309 """hiddenrevs: revs that should be hidden by command and tools
309 """hiddenrevs: revs that should be hidden by command and tools
310
310
311 This set is carried on the repo to ease initialization and lazy
311 This set is carried on the repo to ease initialization and lazy
312 loading; it'll probably move back to changelog for efficiency and
312 loading; it'll probably move back to changelog for efficiency and
313 consistency reasons.
313 consistency reasons.
314
314
315 Note that the hiddenrevs will needs invalidations when
315 Note that the hiddenrevs will needs invalidations when
316 - a new changesets is added (possible unstable above extinct)
316 - a new changesets is added (possible unstable above extinct)
317 - a new obsolete marker is added (possible new extinct changeset)
317 - a new obsolete marker is added (possible new extinct changeset)
318
318
319 hidden changesets cannot have non-hidden descendants
319 hidden changesets cannot have non-hidden descendants
320 """
320 """
321 hidden = set()
321 hidden = set()
322 if self.obsstore:
322 if self.obsstore:
323 ### hide extinct changeset that are not accessible by any mean
323 ### hide extinct changeset that are not accessible by any mean
324 hiddenquery = 'extinct() - ::(. + bookmark())'
324 hiddenquery = 'extinct() - ::(. + bookmark())'
325 hidden.update(self.revs(hiddenquery))
325 hidden.update(self.revs(hiddenquery))
326 return hidden
326 return hidden
327
327
328 @storecache('00changelog.i')
328 @storecache('00changelog.i')
329 def changelog(self):
329 def changelog(self):
330 c = changelog.changelog(self.sopener)
330 c = changelog.changelog(self.sopener)
331 if 'HG_PENDING' in os.environ:
331 if 'HG_PENDING' in os.environ:
332 p = os.environ['HG_PENDING']
332 p = os.environ['HG_PENDING']
333 if p.startswith(self.root):
333 if p.startswith(self.root):
334 c.readpending('00changelog.i.a')
334 c.readpending('00changelog.i.a')
335 return c
335 return c
336
336
337 @storecache('00manifest.i')
337 @storecache('00manifest.i')
338 def manifest(self):
338 def manifest(self):
339 return manifest.manifest(self.sopener)
339 return manifest.manifest(self.sopener)
340
340
341 @filecache('dirstate')
341 @filecache('dirstate')
342 def dirstate(self):
342 def dirstate(self):
343 warned = [0]
343 warned = [0]
344 def validate(node):
344 def validate(node):
345 try:
345 try:
346 self.changelog.rev(node)
346 self.changelog.rev(node)
347 return node
347 return node
348 except error.LookupError:
348 except error.LookupError:
349 if not warned[0]:
349 if not warned[0]:
350 warned[0] = True
350 warned[0] = True
351 self.ui.warn(_("warning: ignoring unknown"
351 self.ui.warn(_("warning: ignoring unknown"
352 " working parent %s!\n") % short(node))
352 " working parent %s!\n") % short(node))
353 return nullid
353 return nullid
354
354
355 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
355 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
356
356
357 def __getitem__(self, changeid):
357 def __getitem__(self, changeid):
358 if changeid is None:
358 if changeid is None:
359 return context.workingctx(self)
359 return context.workingctx(self)
360 return context.changectx(self, changeid)
360 return context.changectx(self, changeid)
361
361
362 def __contains__(self, changeid):
362 def __contains__(self, changeid):
363 try:
363 try:
364 return bool(self.lookup(changeid))
364 return bool(self.lookup(changeid))
365 except error.RepoLookupError:
365 except error.RepoLookupError:
366 return False
366 return False
367
367
368 def __nonzero__(self):
368 def __nonzero__(self):
369 return True
369 return True
370
370
371 def __len__(self):
371 def __len__(self):
372 return len(self.changelog)
372 return len(self.changelog)
373
373
374 def __iter__(self):
374 def __iter__(self):
375 return iter(self.changelog)
375 return iter(self.changelog)
376
376
377 def revs(self, expr, *args):
377 def revs(self, expr, *args):
378 '''Return a list of revisions matching the given revset'''
378 '''Return a list of revisions matching the given revset'''
379 expr = revset.formatspec(expr, *args)
379 expr = revset.formatspec(expr, *args)
380 m = revset.match(None, expr)
380 m = revset.match(None, expr)
381 return [r for r in m(self, list(self))]
381 return [r for r in m(self, list(self))]
382
382
383 def set(self, expr, *args):
383 def set(self, expr, *args):
384 '''
384 '''
385 Yield a context for each matching revision, after doing arg
385 Yield a context for each matching revision, after doing arg
386 replacement via revset.formatspec
386 replacement via revset.formatspec
387 '''
387 '''
388 for r in self.revs(expr, *args):
388 for r in self.revs(expr, *args):
389 yield self[r]
389 yield self[r]
390
390
391 def url(self):
391 def url(self):
392 return 'file:' + self.root
392 return 'file:' + self.root
393
393
394 def hook(self, name, throw=False, **args):
394 def hook(self, name, throw=False, **args):
395 return hook.hook(self.ui, self, name, throw, **args)
395 return hook.hook(self.ui, self, name, throw, **args)
396
396
397 @unfilteredmeth
397 @unfilteredmeth
398 def _tag(self, names, node, message, local, user, date, extra={}):
398 def _tag(self, names, node, message, local, user, date, extra={}):
399 if isinstance(names, str):
399 if isinstance(names, str):
400 names = (names,)
400 names = (names,)
401
401
402 branches = self.branchmap()
402 branches = self.branchmap()
403 for name in names:
403 for name in names:
404 self.hook('pretag', throw=True, node=hex(node), tag=name,
404 self.hook('pretag', throw=True, node=hex(node), tag=name,
405 local=local)
405 local=local)
406 if name in branches:
406 if name in branches:
407 self.ui.warn(_("warning: tag %s conflicts with existing"
407 self.ui.warn(_("warning: tag %s conflicts with existing"
408 " branch name\n") % name)
408 " branch name\n") % name)
409
409
410 def writetags(fp, names, munge, prevtags):
410 def writetags(fp, names, munge, prevtags):
411 fp.seek(0, 2)
411 fp.seek(0, 2)
412 if prevtags and prevtags[-1] != '\n':
412 if prevtags and prevtags[-1] != '\n':
413 fp.write('\n')
413 fp.write('\n')
414 for name in names:
414 for name in names:
415 m = munge and munge(name) or name
415 m = munge and munge(name) or name
416 if (self._tagscache.tagtypes and
416 if (self._tagscache.tagtypes and
417 name in self._tagscache.tagtypes):
417 name in self._tagscache.tagtypes):
418 old = self.tags().get(name, nullid)
418 old = self.tags().get(name, nullid)
419 fp.write('%s %s\n' % (hex(old), m))
419 fp.write('%s %s\n' % (hex(old), m))
420 fp.write('%s %s\n' % (hex(node), m))
420 fp.write('%s %s\n' % (hex(node), m))
421 fp.close()
421 fp.close()
422
422
423 prevtags = ''
423 prevtags = ''
424 if local:
424 if local:
425 try:
425 try:
426 fp = self.opener('localtags', 'r+')
426 fp = self.opener('localtags', 'r+')
427 except IOError:
427 except IOError:
428 fp = self.opener('localtags', 'a')
428 fp = self.opener('localtags', 'a')
429 else:
429 else:
430 prevtags = fp.read()
430 prevtags = fp.read()
431
431
432 # local tags are stored in the current charset
432 # local tags are stored in the current charset
433 writetags(fp, names, None, prevtags)
433 writetags(fp, names, None, prevtags)
434 for name in names:
434 for name in names:
435 self.hook('tag', node=hex(node), tag=name, local=local)
435 self.hook('tag', node=hex(node), tag=name, local=local)
436 return
436 return
437
437
438 try:
438 try:
439 fp = self.wfile('.hgtags', 'rb+')
439 fp = self.wfile('.hgtags', 'rb+')
440 except IOError, e:
440 except IOError, e:
441 if e.errno != errno.ENOENT:
441 if e.errno != errno.ENOENT:
442 raise
442 raise
443 fp = self.wfile('.hgtags', 'ab')
443 fp = self.wfile('.hgtags', 'ab')
444 else:
444 else:
445 prevtags = fp.read()
445 prevtags = fp.read()
446
446
447 # committed tags are stored in UTF-8
447 # committed tags are stored in UTF-8
448 writetags(fp, names, encoding.fromlocal, prevtags)
448 writetags(fp, names, encoding.fromlocal, prevtags)
449
449
450 fp.close()
450 fp.close()
451
451
452 self.invalidatecaches()
452 self.invalidatecaches()
453
453
454 if '.hgtags' not in self.dirstate:
454 if '.hgtags' not in self.dirstate:
455 self[None].add(['.hgtags'])
455 self[None].add(['.hgtags'])
456
456
457 m = matchmod.exact(self.root, '', ['.hgtags'])
457 m = matchmod.exact(self.root, '', ['.hgtags'])
458 tagnode = self.commit(message, user, date, extra=extra, match=m)
458 tagnode = self.commit(message, user, date, extra=extra, match=m)
459
459
460 for name in names:
460 for name in names:
461 self.hook('tag', node=hex(node), tag=name, local=local)
461 self.hook('tag', node=hex(node), tag=name, local=local)
462
462
463 return tagnode
463 return tagnode
464
464
465 def tag(self, names, node, message, local, user, date):
465 def tag(self, names, node, message, local, user, date):
466 '''tag a revision with one or more symbolic names.
466 '''tag a revision with one or more symbolic names.
467
467
468 names is a list of strings or, when adding a single tag, names may be a
468 names is a list of strings or, when adding a single tag, names may be a
469 string.
469 string.
470
470
471 if local is True, the tags are stored in a per-repository file.
471 if local is True, the tags are stored in a per-repository file.
472 otherwise, they are stored in the .hgtags file, and a new
472 otherwise, they are stored in the .hgtags file, and a new
473 changeset is committed with the change.
473 changeset is committed with the change.
474
474
475 keyword arguments:
475 keyword arguments:
476
476
477 local: whether to store tags in non-version-controlled file
477 local: whether to store tags in non-version-controlled file
478 (default False)
478 (default False)
479
479
480 message: commit message to use if committing
480 message: commit message to use if committing
481
481
482 user: name of user to use if committing
482 user: name of user to use if committing
483
483
484 date: date tuple to use if committing'''
484 date: date tuple to use if committing'''
485
485
486 if not local:
486 if not local:
487 for x in self.status()[:5]:
487 for x in self.status()[:5]:
488 if '.hgtags' in x:
488 if '.hgtags' in x:
489 raise util.Abort(_('working copy of .hgtags is changed '
489 raise util.Abort(_('working copy of .hgtags is changed '
490 '(please commit .hgtags manually)'))
490 '(please commit .hgtags manually)'))
491
491
492 self.tags() # instantiate the cache
492 self.tags() # instantiate the cache
493 self._tag(names, node, message, local, user, date)
493 self._tag(names, node, message, local, user, date)
494
494
495 @propertycache
495 @propertycache
496 def _tagscache(self):
496 def _tagscache(self):
497 '''Returns a tagscache object that contains various tags related
497 '''Returns a tagscache object that contains various tags related
498 caches.'''
498 caches.'''
499
499
500 # This simplifies its cache management by having one decorated
500 # This simplifies its cache management by having one decorated
501 # function (this one) and the rest simply fetch things from it.
501 # function (this one) and the rest simply fetch things from it.
502 class tagscache(object):
502 class tagscache(object):
503 def __init__(self):
503 def __init__(self):
504 # These two define the set of tags for this repository. tags
504 # These two define the set of tags for this repository. tags
505 # maps tag name to node; tagtypes maps tag name to 'global' or
505 # maps tag name to node; tagtypes maps tag name to 'global' or
506 # 'local'. (Global tags are defined by .hgtags across all
506 # 'local'. (Global tags are defined by .hgtags across all
507 # heads, and local tags are defined in .hg/localtags.)
507 # heads, and local tags are defined in .hg/localtags.)
508 # They constitute the in-memory cache of tags.
508 # They constitute the in-memory cache of tags.
509 self.tags = self.tagtypes = None
509 self.tags = self.tagtypes = None
510
510
511 self.nodetagscache = self.tagslist = None
511 self.nodetagscache = self.tagslist = None
512
512
513 cache = tagscache()
513 cache = tagscache()
514 cache.tags, cache.tagtypes = self._findtags()
514 cache.tags, cache.tagtypes = self._findtags()
515
515
516 return cache
516 return cache
517
517
518 def tags(self):
518 def tags(self):
519 '''return a mapping of tag to node'''
519 '''return a mapping of tag to node'''
520 t = {}
520 t = {}
521 if self.changelog.filteredrevs:
521 if self.changelog.filteredrevs:
522 tags, tt = self._findtags()
522 tags, tt = self._findtags()
523 else:
523 else:
524 tags = self._tagscache.tags
524 tags = self._tagscache.tags
525 for k, v in tags.iteritems():
525 for k, v in tags.iteritems():
526 try:
526 try:
527 # ignore tags to unknown nodes
527 # ignore tags to unknown nodes
528 self.changelog.rev(v)
528 self.changelog.rev(v)
529 t[k] = v
529 t[k] = v
530 except (error.LookupError, ValueError):
530 except (error.LookupError, ValueError):
531 pass
531 pass
532 return t
532 return t
533
533
534 def _findtags(self):
534 def _findtags(self):
535 '''Do the hard work of finding tags. Return a pair of dicts
535 '''Do the hard work of finding tags. Return a pair of dicts
536 (tags, tagtypes) where tags maps tag name to node, and tagtypes
536 (tags, tagtypes) where tags maps tag name to node, and tagtypes
537 maps tag name to a string like \'global\' or \'local\'.
537 maps tag name to a string like \'global\' or \'local\'.
538 Subclasses or extensions are free to add their own tags, but
538 Subclasses or extensions are free to add their own tags, but
539 should be aware that the returned dicts will be retained for the
539 should be aware that the returned dicts will be retained for the
540 duration of the localrepo object.'''
540 duration of the localrepo object.'''
541
541
542 # XXX what tagtype should subclasses/extensions use? Currently
542 # XXX what tagtype should subclasses/extensions use? Currently
543 # mq and bookmarks add tags, but do not set the tagtype at all.
543 # mq and bookmarks add tags, but do not set the tagtype at all.
544 # Should each extension invent its own tag type? Should there
544 # Should each extension invent its own tag type? Should there
545 # be one tagtype for all such "virtual" tags? Or is the status
545 # be one tagtype for all such "virtual" tags? Or is the status
546 # quo fine?
546 # quo fine?
547
547
548 alltags = {} # map tag name to (node, hist)
548 alltags = {} # map tag name to (node, hist)
549 tagtypes = {}
549 tagtypes = {}
550
550
551 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
551 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
552 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
552 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
553
553
554 # Build the return dicts. Have to re-encode tag names because
554 # Build the return dicts. Have to re-encode tag names because
555 # the tags module always uses UTF-8 (in order not to lose info
555 # the tags module always uses UTF-8 (in order not to lose info
556 # writing to the cache), but the rest of Mercurial wants them in
556 # writing to the cache), but the rest of Mercurial wants them in
557 # local encoding.
557 # local encoding.
558 tags = {}
558 tags = {}
559 for (name, (node, hist)) in alltags.iteritems():
559 for (name, (node, hist)) in alltags.iteritems():
560 if node != nullid:
560 if node != nullid:
561 tags[encoding.tolocal(name)] = node
561 tags[encoding.tolocal(name)] = node
562 tags['tip'] = self.changelog.tip()
562 tags['tip'] = self.changelog.tip()
563 tagtypes = dict([(encoding.tolocal(name), value)
563 tagtypes = dict([(encoding.tolocal(name), value)
564 for (name, value) in tagtypes.iteritems()])
564 for (name, value) in tagtypes.iteritems()])
565 return (tags, tagtypes)
565 return (tags, tagtypes)
566
566
567 def tagtype(self, tagname):
567 def tagtype(self, tagname):
568 '''
568 '''
569 return the type of the given tag. result can be:
569 return the type of the given tag. result can be:
570
570
571 'local' : a local tag
571 'local' : a local tag
572 'global' : a global tag
572 'global' : a global tag
573 None : tag does not exist
573 None : tag does not exist
574 '''
574 '''
575
575
576 return self._tagscache.tagtypes.get(tagname)
576 return self._tagscache.tagtypes.get(tagname)
577
577
578 def tagslist(self):
578 def tagslist(self):
579 '''return a list of tags ordered by revision'''
579 '''return a list of tags ordered by revision'''
580 if not self._tagscache.tagslist:
580 if not self._tagscache.tagslist:
581 l = []
581 l = []
582 for t, n in self.tags().iteritems():
582 for t, n in self.tags().iteritems():
583 r = self.changelog.rev(n)
583 r = self.changelog.rev(n)
584 l.append((r, t, n))
584 l.append((r, t, n))
585 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
585 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
586
586
587 return self._tagscache.tagslist
587 return self._tagscache.tagslist
588
588
589 def nodetags(self, node):
589 def nodetags(self, node):
590 '''return the tags associated with a node'''
590 '''return the tags associated with a node'''
591 if not self._tagscache.nodetagscache:
591 if not self._tagscache.nodetagscache:
592 nodetagscache = {}
592 nodetagscache = {}
593 for t, n in self._tagscache.tags.iteritems():
593 for t, n in self._tagscache.tags.iteritems():
594 nodetagscache.setdefault(n, []).append(t)
594 nodetagscache.setdefault(n, []).append(t)
595 for tags in nodetagscache.itervalues():
595 for tags in nodetagscache.itervalues():
596 tags.sort()
596 tags.sort()
597 self._tagscache.nodetagscache = nodetagscache
597 self._tagscache.nodetagscache = nodetagscache
598 return self._tagscache.nodetagscache.get(node, [])
598 return self._tagscache.nodetagscache.get(node, [])
599
599
600 def nodebookmarks(self, node):
600 def nodebookmarks(self, node):
601 marks = []
601 marks = []
602 for bookmark, n in self._bookmarks.iteritems():
602 for bookmark, n in self._bookmarks.iteritems():
603 if n == node:
603 if n == node:
604 marks.append(bookmark)
604 marks.append(bookmark)
605 return sorted(marks)
605 return sorted(marks)
606
606
607 def _branchtags(self, partial, lrev):
607 def _branchtags(self, partial, lrev):
608 # TODO: rename this function?
608 # TODO: rename this function?
609 tiprev = len(self) - 1
609 tiprev = len(self) - 1
610 if lrev != tiprev:
610 if lrev != tiprev:
611 ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
611 ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
612 self._updatebranchcache(partial, ctxgen)
612 self._updatebranchcache(partial, ctxgen)
613 self._writebranchcache(partial, self.changelog.tip(), tiprev)
613 self._writebranchcache(partial, self.changelog.tip(), tiprev)
614
614
615 return partial
615 return partial
616
616
617 @unfilteredmeth # Until we get a smarter cache management
617 @unfilteredmeth # Until we get a smarter cache management
618 def updatebranchcache(self):
618 def updatebranchcache(self):
619 tip = self.changelog.tip()
619 tip = self.changelog.tip()
620 if self._branchcache is not None and self._branchcachetip == tip:
620 if self._branchcache is not None and self._branchcachetip == tip:
621 return
621 return
622
622
623 oldtip = self._branchcachetip
623 oldtip = self._branchcachetip
624 self._branchcachetip = tip
624 self._branchcachetip = tip
625 if oldtip is None or oldtip not in self.changelog.nodemap:
625 if oldtip is None or oldtip not in self.changelog.nodemap:
626 partial, last, lrev = self._readbranchcache()
626 partial, last, lrev = self._readbranchcache()
627 else:
627 else:
628 lrev = self.changelog.rev(oldtip)
628 lrev = self.changelog.rev(oldtip)
629 partial = self._branchcache
629 partial = self._branchcache
630
630
631 self._branchtags(partial, lrev)
631 self._branchtags(partial, lrev)
632 # this private cache holds all heads (not just the branch tips)
632 # this private cache holds all heads (not just the branch tips)
633 self._branchcache = partial
633 self._branchcache = partial
634
634
635 def branchmap(self):
635 def branchmap(self):
636 '''returns a dictionary {branch: [branchheads]}'''
636 '''returns a dictionary {branch: [branchheads]}'''
637 if self.changelog.filteredrevs:
637 if self.changelog.filteredrevs:
638 # some changeset are excluded we can't use the cache
638 # some changeset are excluded we can't use the cache
639 branchmap = {}
639 branchmap = {}
640 self._updatebranchcache(branchmap, (self[r] for r in self))
640 self._updatebranchcache(branchmap, (self[r] for r in self))
641 return branchmap
641 return branchmap
642 else:
642 else:
643 self.updatebranchcache()
643 self.updatebranchcache()
644 return self._branchcache
644 return self._branchcache
645
645
646
646
647 def _branchtip(self, heads):
647 def _branchtip(self, heads):
648 '''return the tipmost branch head in heads'''
648 '''return the tipmost branch head in heads'''
649 tip = heads[-1]
649 tip = heads[-1]
650 for h in reversed(heads):
650 for h in reversed(heads):
651 if not self[h].closesbranch():
651 if not self[h].closesbranch():
652 tip = h
652 tip = h
653 break
653 break
654 return tip
654 return tip
655
655
656 def branchtip(self, branch):
656 def branchtip(self, branch):
657 '''return the tip node for a given branch'''
657 '''return the tip node for a given branch'''
658 if branch not in self.branchmap():
658 if branch not in self.branchmap():
659 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
659 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
660 return self._branchtip(self.branchmap()[branch])
660 return self._branchtip(self.branchmap()[branch])
661
661
662 def branchtags(self):
662 def branchtags(self):
663 '''return a dict where branch names map to the tipmost head of
663 '''return a dict where branch names map to the tipmost head of
664 the branch, open heads come before closed'''
664 the branch, open heads come before closed'''
665 bt = {}
665 bt = {}
666 for bn, heads in self.branchmap().iteritems():
666 for bn, heads in self.branchmap().iteritems():
667 bt[bn] = self._branchtip(heads)
667 bt[bn] = self._branchtip(heads)
668 return bt
668 return bt
669
669
670 @unfilteredmeth # Until we get a smarter cache management
670 @unfilteredmeth # Until we get a smarter cache management
671 def _readbranchcache(self):
671 def _readbranchcache(self):
672 partial = {}
672 partial = {}
673 try:
673 try:
674 f = self.opener("cache/branchheads")
674 f = self.opener("cache/branchheads")
675 lines = f.read().split('\n')
675 lines = f.read().split('\n')
676 f.close()
676 f.close()
677 except (IOError, OSError):
677 except (IOError, OSError):
678 return {}, nullid, nullrev
678 return {}, nullid, nullrev
679
679
680 try:
680 try:
681 last, lrev = lines.pop(0).split(" ", 1)
681 last, lrev = lines.pop(0).split(" ", 1)
682 last, lrev = bin(last), int(lrev)
682 last, lrev = bin(last), int(lrev)
683 if lrev >= len(self) or self[lrev].node() != last:
683 if lrev >= len(self) or self[lrev].node() != last:
684 # invalidate the cache
684 # invalidate the cache
685 raise ValueError('invalidating branch cache (tip differs)')
685 raise ValueError('invalidating branch cache (tip differs)')
686 for l in lines:
686 for l in lines:
687 if not l:
687 if not l:
688 continue
688 continue
689 node, label = l.split(" ", 1)
689 node, label = l.split(" ", 1)
690 label = encoding.tolocal(label.strip())
690 label = encoding.tolocal(label.strip())
691 if not node in self:
691 if not node in self:
692 raise ValueError('invalidating branch cache because node '+
692 raise ValueError('invalidating branch cache because node '+
693 '%s does not exist' % node)
693 '%s does not exist' % node)
694 partial.setdefault(label, []).append(bin(node))
694 partial.setdefault(label, []).append(bin(node))
695 except KeyboardInterrupt:
695 except KeyboardInterrupt:
696 raise
696 raise
697 except Exception, inst:
697 except Exception, inst:
698 if self.ui.debugflag:
698 if self.ui.debugflag:
699 self.ui.warn(str(inst), '\n')
699 self.ui.warn(str(inst), '\n')
700 partial, last, lrev = {}, nullid, nullrev
700 partial, last, lrev = {}, nullid, nullrev
701 return partial, last, lrev
701 return partial, last, lrev
702
702
703 @unfilteredmeth # Until we get a smarter cache management
703 @unfilteredmeth # Until we get a smarter cache management
704 def _writebranchcache(self, branches, tip, tiprev):
704 def _writebranchcache(self, branches, tip, tiprev):
705 try:
705 try:
706 f = self.opener("cache/branchheads", "w", atomictemp=True)
706 f = self.opener("cache/branchheads", "w", atomictemp=True)
707 f.write("%s %s\n" % (hex(tip), tiprev))
707 f.write("%s %s\n" % (hex(tip), tiprev))
708 for label, nodes in branches.iteritems():
708 for label, nodes in branches.iteritems():
709 for node in nodes:
709 for node in nodes:
710 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
710 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
711 f.close()
711 f.close()
712 except (IOError, OSError):
712 except (IOError, OSError):
713 pass
713 pass
714
714
715 @unfilteredmeth # Until we get a smarter cache management
715 @unfilteredmeth # Until we get a smarter cache management
716 def _updatebranchcache(self, partial, ctxgen):
716 def _updatebranchcache(self, partial, ctxgen):
717 """Given a branchhead cache, partial, that may have extra nodes or be
717 """Given a branchhead cache, partial, that may have extra nodes or be
718 missing heads, and a generator of nodes that are at least a superset of
718 missing heads, and a generator of nodes that are at least a superset of
719 heads missing, this function updates partial to be correct.
719 heads missing, this function updates partial to be correct.
720 """
720 """
721 # collect new branch entries
721 # collect new branch entries
722 newbranches = {}
722 newbranches = {}
723 for c in ctxgen:
723 for c in ctxgen:
724 newbranches.setdefault(c.branch(), []).append(c.node())
724 newbranches.setdefault(c.branch(), []).append(c.node())
725 # if older branchheads are reachable from new ones, they aren't
725 # if older branchheads are reachable from new ones, they aren't
726 # really branchheads. Note checking parents is insufficient:
726 # really branchheads. Note checking parents is insufficient:
727 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
727 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
728 for branch, newnodes in newbranches.iteritems():
728 for branch, newnodes in newbranches.iteritems():
729 bheads = partial.setdefault(branch, [])
729 bheads = partial.setdefault(branch, [])
730 # Remove candidate heads that no longer are in the repo (e.g., as
730 # Remove candidate heads that no longer are in the repo (e.g., as
731 # the result of a strip that just happened). Avoid using 'node in
731 # the result of a strip that just happened). Avoid using 'node in
732 # self' here because that dives down into branchcache code somewhat
732 # self' here because that dives down into branchcache code somewhat
733 # recursively.
733 # recursively.
734 bheadrevs = [self.changelog.rev(node) for node in bheads
734 bheadrevs = [self.changelog.rev(node) for node in bheads
735 if self.changelog.hasnode(node)]
735 if self.changelog.hasnode(node)]
736 newheadrevs = [self.changelog.rev(node) for node in newnodes
736 newheadrevs = [self.changelog.rev(node) for node in newnodes
737 if self.changelog.hasnode(node)]
737 if self.changelog.hasnode(node)]
738 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
738 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
739 # Remove duplicates - nodes that are in newheadrevs and are already
739 # Remove duplicates - nodes that are in newheadrevs and are already
740 # in bheadrevs. This can happen if you strip a node whose parent
740 # in bheadrevs. This can happen if you strip a node whose parent
741 # was already a head (because they're on different branches).
741 # was already a head (because they're on different branches).
742 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
742 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
743
743
744 # Starting from tip means fewer passes over reachable. If we know
744 # Starting from tip means fewer passes over reachable. If we know
745 # the new candidates are not ancestors of existing heads, we don't
745 # the new candidates are not ancestors of existing heads, we don't
746 # have to examine ancestors of existing heads
746 # have to examine ancestors of existing heads
747 if ctxisnew:
747 if ctxisnew:
748 iterrevs = sorted(newheadrevs)
748 iterrevs = sorted(newheadrevs)
749 else:
749 else:
750 iterrevs = list(bheadrevs)
750 iterrevs = list(bheadrevs)
751
751
752 # This loop prunes out two kinds of heads - heads that are
752 # This loop prunes out two kinds of heads - heads that are
753 # superseded by a head in newheadrevs, and newheadrevs that are not
753 # superseded by a head in newheadrevs, and newheadrevs that are not
754 # heads because an existing head is their descendant.
754 # heads because an existing head is their descendant.
755 while iterrevs:
755 while iterrevs:
756 latest = iterrevs.pop()
756 latest = iterrevs.pop()
757 if latest not in bheadrevs:
757 if latest not in bheadrevs:
758 continue
758 continue
759 ancestors = set(self.changelog.ancestors([latest],
759 ancestors = set(self.changelog.ancestors([latest],
760 bheadrevs[0]))
760 bheadrevs[0]))
761 if ancestors:
761 if ancestors:
762 bheadrevs = [b for b in bheadrevs if b not in ancestors]
762 bheadrevs = [b for b in bheadrevs if b not in ancestors]
763 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
763 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
764
764
765 # There may be branches that cease to exist when the last commit in the
765 # There may be branches that cease to exist when the last commit in the
766 # branch was stripped. This code filters them out. Note that the
766 # branch was stripped. This code filters them out. Note that the
767 # branch that ceased to exist may not be in newbranches because
767 # branch that ceased to exist may not be in newbranches because
768 # newbranches is the set of candidate heads, which when you strip the
768 # newbranches is the set of candidate heads, which when you strip the
769 # last commit in a branch will be the parent branch.
769 # last commit in a branch will be the parent branch.
770 for branch in partial.keys():
770 for branch in partial.keys():
771 nodes = [head for head in partial[branch]
771 nodes = [head for head in partial[branch]
772 if self.changelog.hasnode(head)]
772 if self.changelog.hasnode(head)]
773 if not nodes:
773 if not nodes:
774 del partial[branch]
774 del partial[branch]
775
775
776 def lookup(self, key):
776 def lookup(self, key):
777 return self[key].node()
777 return self[key].node()
778
778
779 def lookupbranch(self, key, remote=None):
779 def lookupbranch(self, key, remote=None):
780 repo = remote or self
780 repo = remote or self
781 if key in repo.branchmap():
781 if key in repo.branchmap():
782 return key
782 return key
783
783
784 repo = (remote and remote.local()) and remote or self
784 repo = (remote and remote.local()) and remote or self
785 return repo[key].branch()
785 return repo[key].branch()
786
786
787 def known(self, nodes):
787 def known(self, nodes):
788 nm = self.changelog.nodemap
788 nm = self.changelog.nodemap
789 pc = self._phasecache
789 pc = self._phasecache
790 result = []
790 result = []
791 for n in nodes:
791 for n in nodes:
792 r = nm.get(n)
792 r = nm.get(n)
793 resp = not (r is None or pc.phase(self, r) >= phases.secret)
793 resp = not (r is None or pc.phase(self, r) >= phases.secret)
794 result.append(resp)
794 result.append(resp)
795 return result
795 return result
796
796
797 def local(self):
797 def local(self):
798 return self
798 return self
799
799
800 def cancopy(self):
800 def cancopy(self):
801 return self.local() # so statichttprepo's override of local() works
801 return self.local() # so statichttprepo's override of local() works
802
802
803 def join(self, f):
803 def join(self, f):
804 return os.path.join(self.path, f)
804 return os.path.join(self.path, f)
805
805
806 def wjoin(self, f):
806 def wjoin(self, f):
807 return os.path.join(self.root, f)
807 return os.path.join(self.root, f)
808
808
809 def file(self, f):
809 def file(self, f):
810 if f[0] == '/':
810 if f[0] == '/':
811 f = f[1:]
811 f = f[1:]
812 return filelog.filelog(self.sopener, f)
812 return filelog.filelog(self.sopener, f)
813
813
814 def changectx(self, changeid):
814 def changectx(self, changeid):
815 return self[changeid]
815 return self[changeid]
816
816
817 def parents(self, changeid=None):
817 def parents(self, changeid=None):
818 '''get list of changectxs for parents of changeid'''
818 '''get list of changectxs for parents of changeid'''
819 return self[changeid].parents()
819 return self[changeid].parents()
820
820
821 def setparents(self, p1, p2=nullid):
821 def setparents(self, p1, p2=nullid):
822 copies = self.dirstate.setparents(p1, p2)
822 copies = self.dirstate.setparents(p1, p2)
823 if copies:
823 if copies:
824 # Adjust copy records, the dirstate cannot do it, it
824 # Adjust copy records, the dirstate cannot do it, it
825 # requires access to parents manifests. Preserve them
825 # requires access to parents manifests. Preserve them
826 # only for entries added to first parent.
826 # only for entries added to first parent.
827 pctx = self[p1]
827 pctx = self[p1]
828 for f in copies:
828 for f in copies:
829 if f not in pctx and copies[f] in pctx:
829 if f not in pctx and copies[f] in pctx:
830 self.dirstate.copy(copies[f], f)
830 self.dirstate.copy(copies[f], f)
831
831
832 def filectx(self, path, changeid=None, fileid=None):
832 def filectx(self, path, changeid=None, fileid=None):
833 """changeid can be a changeset revision, node, or tag.
833 """changeid can be a changeset revision, node, or tag.
834 fileid can be a file revision or node."""
834 fileid can be a file revision or node."""
835 return context.filectx(self, path, changeid, fileid)
835 return context.filectx(self, path, changeid, fileid)
836
836
837 def getcwd(self):
837 def getcwd(self):
838 return self.dirstate.getcwd()
838 return self.dirstate.getcwd()
839
839
840 def pathto(self, f, cwd=None):
840 def pathto(self, f, cwd=None):
841 return self.dirstate.pathto(f, cwd)
841 return self.dirstate.pathto(f, cwd)
842
842
843 def wfile(self, f, mode='r'):
843 def wfile(self, f, mode='r'):
844 return self.wopener(f, mode)
844 return self.wopener(f, mode)
845
845
846 def _link(self, f):
846 def _link(self, f):
847 return os.path.islink(self.wjoin(f))
847 return os.path.islink(self.wjoin(f))
848
848
849 def _loadfilter(self, filter):
849 def _loadfilter(self, filter):
850 if filter not in self.filterpats:
850 if filter not in self.filterpats:
851 l = []
851 l = []
852 for pat, cmd in self.ui.configitems(filter):
852 for pat, cmd in self.ui.configitems(filter):
853 if cmd == '!':
853 if cmd == '!':
854 continue
854 continue
855 mf = matchmod.match(self.root, '', [pat])
855 mf = matchmod.match(self.root, '', [pat])
856 fn = None
856 fn = None
857 params = cmd
857 params = cmd
858 for name, filterfn in self._datafilters.iteritems():
858 for name, filterfn in self._datafilters.iteritems():
859 if cmd.startswith(name):
859 if cmd.startswith(name):
860 fn = filterfn
860 fn = filterfn
861 params = cmd[len(name):].lstrip()
861 params = cmd[len(name):].lstrip()
862 break
862 break
863 if not fn:
863 if not fn:
864 fn = lambda s, c, **kwargs: util.filter(s, c)
864 fn = lambda s, c, **kwargs: util.filter(s, c)
865 # Wrap old filters not supporting keyword arguments
865 # Wrap old filters not supporting keyword arguments
866 if not inspect.getargspec(fn)[2]:
866 if not inspect.getargspec(fn)[2]:
867 oldfn = fn
867 oldfn = fn
868 fn = lambda s, c, **kwargs: oldfn(s, c)
868 fn = lambda s, c, **kwargs: oldfn(s, c)
869 l.append((mf, fn, params))
869 l.append((mf, fn, params))
870 self.filterpats[filter] = l
870 self.filterpats[filter] = l
871 return self.filterpats[filter]
871 return self.filterpats[filter]
872
872
873 def _filter(self, filterpats, filename, data):
873 def _filter(self, filterpats, filename, data):
874 for mf, fn, cmd in filterpats:
874 for mf, fn, cmd in filterpats:
875 if mf(filename):
875 if mf(filename):
876 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
876 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
877 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
877 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
878 break
878 break
879
879
880 return data
880 return data
881
881
882 @propertycache
882 @propertycache
883 def _encodefilterpats(self):
883 def _encodefilterpats(self):
884 return self._loadfilter('encode')
884 return self._loadfilter('encode')
885
885
886 @propertycache
886 @propertycache
887 def _decodefilterpats(self):
887 def _decodefilterpats(self):
888 return self._loadfilter('decode')
888 return self._loadfilter('decode')
889
889
890 def adddatafilter(self, name, filter):
890 def adddatafilter(self, name, filter):
891 self._datafilters[name] = filter
891 self._datafilters[name] = filter
892
892
893 def wread(self, filename):
893 def wread(self, filename):
894 if self._link(filename):
894 if self._link(filename):
895 data = os.readlink(self.wjoin(filename))
895 data = os.readlink(self.wjoin(filename))
896 else:
896 else:
897 data = self.wopener.read(filename)
897 data = self.wopener.read(filename)
898 return self._filter(self._encodefilterpats, filename, data)
898 return self._filter(self._encodefilterpats, filename, data)
899
899
900 def wwrite(self, filename, data, flags):
900 def wwrite(self, filename, data, flags):
901 data = self._filter(self._decodefilterpats, filename, data)
901 data = self._filter(self._decodefilterpats, filename, data)
902 if 'l' in flags:
902 if 'l' in flags:
903 self.wopener.symlink(data, filename)
903 self.wopener.symlink(data, filename)
904 else:
904 else:
905 self.wopener.write(filename, data)
905 self.wopener.write(filename, data)
906 if 'x' in flags:
906 if 'x' in flags:
907 util.setflags(self.wjoin(filename), False, True)
907 util.setflags(self.wjoin(filename), False, True)
908
908
909 def wwritedata(self, filename, data):
909 def wwritedata(self, filename, data):
910 return self._filter(self._decodefilterpats, filename, data)
910 return self._filter(self._decodefilterpats, filename, data)
911
911
912 def transaction(self, desc):
912 def transaction(self, desc):
913 tr = self._transref and self._transref() or None
913 tr = self._transref and self._transref() or None
914 if tr and tr.running():
914 if tr and tr.running():
915 return tr.nest()
915 return tr.nest()
916
916
917 # abort here if the journal already exists
917 # abort here if the journal already exists
918 if os.path.exists(self.sjoin("journal")):
918 if os.path.exists(self.sjoin("journal")):
919 raise error.RepoError(
919 raise error.RepoError(
920 _("abandoned transaction found - run hg recover"))
920 _("abandoned transaction found - run hg recover"))
921
921
922 self._writejournal(desc)
922 self._writejournal(desc)
923 renames = [(x, undoname(x)) for x in self._journalfiles()]
923 renames = [(x, undoname(x)) for x in self._journalfiles()]
924
924
925 tr = transaction.transaction(self.ui.warn, self.sopener,
925 tr = transaction.transaction(self.ui.warn, self.sopener,
926 self.sjoin("journal"),
926 self.sjoin("journal"),
927 aftertrans(renames),
927 aftertrans(renames),
928 self.store.createmode)
928 self.store.createmode)
929 self._transref = weakref.ref(tr)
929 self._transref = weakref.ref(tr)
930 return tr
930 return tr
931
931
932 def _journalfiles(self):
932 def _journalfiles(self):
933 return (self.sjoin('journal'), self.join('journal.dirstate'),
933 return (self.sjoin('journal'), self.join('journal.dirstate'),
934 self.join('journal.branch'), self.join('journal.desc'),
934 self.join('journal.branch'), self.join('journal.desc'),
935 self.join('journal.bookmarks'),
935 self.join('journal.bookmarks'),
936 self.sjoin('journal.phaseroots'))
936 self.sjoin('journal.phaseroots'))
937
937
938 def undofiles(self):
938 def undofiles(self):
939 return [undoname(x) for x in self._journalfiles()]
939 return [undoname(x) for x in self._journalfiles()]
940
940
941 def _writejournal(self, desc):
941 def _writejournal(self, desc):
942 self.opener.write("journal.dirstate",
942 self.opener.write("journal.dirstate",
943 self.opener.tryread("dirstate"))
943 self.opener.tryread("dirstate"))
944 self.opener.write("journal.branch",
944 self.opener.write("journal.branch",
945 encoding.fromlocal(self.dirstate.branch()))
945 encoding.fromlocal(self.dirstate.branch()))
946 self.opener.write("journal.desc",
946 self.opener.write("journal.desc",
947 "%d\n%s\n" % (len(self), desc))
947 "%d\n%s\n" % (len(self), desc))
948 self.opener.write("journal.bookmarks",
948 self.opener.write("journal.bookmarks",
949 self.opener.tryread("bookmarks"))
949 self.opener.tryread("bookmarks"))
950 self.sopener.write("journal.phaseroots",
950 self.sopener.write("journal.phaseroots",
951 self.sopener.tryread("phaseroots"))
951 self.sopener.tryread("phaseroots"))
952
952
953 def recover(self):
953 def recover(self):
954 lock = self.lock()
954 lock = self.lock()
955 try:
955 try:
956 if os.path.exists(self.sjoin("journal")):
956 if os.path.exists(self.sjoin("journal")):
957 self.ui.status(_("rolling back interrupted transaction\n"))
957 self.ui.status(_("rolling back interrupted transaction\n"))
958 transaction.rollback(self.sopener, self.sjoin("journal"),
958 transaction.rollback(self.sopener, self.sjoin("journal"),
959 self.ui.warn)
959 self.ui.warn)
960 self.invalidate()
960 self.invalidate()
961 return True
961 return True
962 else:
962 else:
963 self.ui.warn(_("no interrupted transaction available\n"))
963 self.ui.warn(_("no interrupted transaction available\n"))
964 return False
964 return False
965 finally:
965 finally:
966 lock.release()
966 lock.release()
967
967
968 def rollback(self, dryrun=False, force=False):
968 def rollback(self, dryrun=False, force=False):
969 wlock = lock = None
969 wlock = lock = None
970 try:
970 try:
971 wlock = self.wlock()
971 wlock = self.wlock()
972 lock = self.lock()
972 lock = self.lock()
973 if os.path.exists(self.sjoin("undo")):
973 if os.path.exists(self.sjoin("undo")):
974 return self._rollback(dryrun, force)
974 return self._rollback(dryrun, force)
975 else:
975 else:
976 self.ui.warn(_("no rollback information available\n"))
976 self.ui.warn(_("no rollback information available\n"))
977 return 1
977 return 1
978 finally:
978 finally:
979 release(lock, wlock)
979 release(lock, wlock)
980
980
981 def _rollback(self, dryrun, force):
981 def _rollback(self, dryrun, force):
982 ui = self.ui
982 ui = self.ui
983 try:
983 try:
984 args = self.opener.read('undo.desc').splitlines()
984 args = self.opener.read('undo.desc').splitlines()
985 (oldlen, desc, detail) = (int(args[0]), args[1], None)
985 (oldlen, desc, detail) = (int(args[0]), args[1], None)
986 if len(args) >= 3:
986 if len(args) >= 3:
987 detail = args[2]
987 detail = args[2]
988 oldtip = oldlen - 1
988 oldtip = oldlen - 1
989
989
990 if detail and ui.verbose:
990 if detail and ui.verbose:
991 msg = (_('repository tip rolled back to revision %s'
991 msg = (_('repository tip rolled back to revision %s'
992 ' (undo %s: %s)\n')
992 ' (undo %s: %s)\n')
993 % (oldtip, desc, detail))
993 % (oldtip, desc, detail))
994 else:
994 else:
995 msg = (_('repository tip rolled back to revision %s'
995 msg = (_('repository tip rolled back to revision %s'
996 ' (undo %s)\n')
996 ' (undo %s)\n')
997 % (oldtip, desc))
997 % (oldtip, desc))
998 except IOError:
998 except IOError:
999 msg = _('rolling back unknown transaction\n')
999 msg = _('rolling back unknown transaction\n')
1000 desc = None
1000 desc = None
1001
1001
1002 if not force and self['.'] != self['tip'] and desc == 'commit':
1002 if not force and self['.'] != self['tip'] and desc == 'commit':
1003 raise util.Abort(
1003 raise util.Abort(
1004 _('rollback of last commit while not checked out '
1004 _('rollback of last commit while not checked out '
1005 'may lose data'), hint=_('use -f to force'))
1005 'may lose data'), hint=_('use -f to force'))
1006
1006
1007 ui.status(msg)
1007 ui.status(msg)
1008 if dryrun:
1008 if dryrun:
1009 return 0
1009 return 0
1010
1010
1011 parents = self.dirstate.parents()
1011 parents = self.dirstate.parents()
1012 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1012 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1013 if os.path.exists(self.join('undo.bookmarks')):
1013 if os.path.exists(self.join('undo.bookmarks')):
1014 util.rename(self.join('undo.bookmarks'),
1014 util.rename(self.join('undo.bookmarks'),
1015 self.join('bookmarks'))
1015 self.join('bookmarks'))
1016 if os.path.exists(self.sjoin('undo.phaseroots')):
1016 if os.path.exists(self.sjoin('undo.phaseroots')):
1017 util.rename(self.sjoin('undo.phaseroots'),
1017 util.rename(self.sjoin('undo.phaseroots'),
1018 self.sjoin('phaseroots'))
1018 self.sjoin('phaseroots'))
1019 self.invalidate()
1019 self.invalidate()
1020
1020
1021 # Discard all cache entries to force reloading everything.
1021 # Discard all cache entries to force reloading everything.
1022 self._filecache.clear()
1022 self._filecache.clear()
1023
1023
1024 parentgone = (parents[0] not in self.changelog.nodemap or
1024 parentgone = (parents[0] not in self.changelog.nodemap or
1025 parents[1] not in self.changelog.nodemap)
1025 parents[1] not in self.changelog.nodemap)
1026 if parentgone:
1026 if parentgone:
1027 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1027 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1028 try:
1028 try:
1029 branch = self.opener.read('undo.branch')
1029 branch = self.opener.read('undo.branch')
1030 self.dirstate.setbranch(encoding.tolocal(branch))
1030 self.dirstate.setbranch(encoding.tolocal(branch))
1031 except IOError:
1031 except IOError:
1032 ui.warn(_('named branch could not be reset: '
1032 ui.warn(_('named branch could not be reset: '
1033 'current branch is still \'%s\'\n')
1033 'current branch is still \'%s\'\n')
1034 % self.dirstate.branch())
1034 % self.dirstate.branch())
1035
1035
1036 self.dirstate.invalidate()
1036 self.dirstate.invalidate()
1037 parents = tuple([p.rev() for p in self.parents()])
1037 parents = tuple([p.rev() for p in self.parents()])
1038 if len(parents) > 1:
1038 if len(parents) > 1:
1039 ui.status(_('working directory now based on '
1039 ui.status(_('working directory now based on '
1040 'revisions %d and %d\n') % parents)
1040 'revisions %d and %d\n') % parents)
1041 else:
1041 else:
1042 ui.status(_('working directory now based on '
1042 ui.status(_('working directory now based on '
1043 'revision %d\n') % parents)
1043 'revision %d\n') % parents)
1044 # TODO: if we know which new heads may result from this rollback, pass
1044 # TODO: if we know which new heads may result from this rollback, pass
1045 # them to destroy(), which will prevent the branchhead cache from being
1045 # them to destroy(), which will prevent the branchhead cache from being
1046 # invalidated.
1046 # invalidated.
1047 self.destroyed()
1047 self.destroyed()
1048 return 0
1048 return 0
1049
1049
1050 def invalidatecaches(self):
1050 def invalidatecaches(self):
1051 def delcache(name):
1051 def delcache(name):
1052 try:
1052 try:
1053 delattr(self, name)
1053 delattr(self, name)
1054 except AttributeError:
1054 except AttributeError:
1055 pass
1055 pass
1056
1056
1057 delcache('_tagscache')
1057 delcache('_tagscache')
1058
1058
1059 self.unfiltered()._branchcache = None # in UTF-8
1059 self.unfiltered()._branchcache = None # in UTF-8
1060 self.unfiltered()._branchcachetip = None
1060 self.unfiltered()._branchcachetip = None
1061 obsolete.clearobscaches(self)
1061 obsolete.clearobscaches(self)
1062
1062
1063 def invalidatedirstate(self):
1063 def invalidatedirstate(self):
1064 '''Invalidates the dirstate, causing the next call to dirstate
1064 '''Invalidates the dirstate, causing the next call to dirstate
1065 to check if it was modified since the last time it was read,
1065 to check if it was modified since the last time it was read,
1066 rereading it if it has.
1066 rereading it if it has.
1067
1067
1068 This is different to dirstate.invalidate() that it doesn't always
1068 This is different to dirstate.invalidate() that it doesn't always
1069 rereads the dirstate. Use dirstate.invalidate() if you want to
1069 rereads the dirstate. Use dirstate.invalidate() if you want to
1070 explicitly read the dirstate again (i.e. restoring it to a previous
1070 explicitly read the dirstate again (i.e. restoring it to a previous
1071 known good state).'''
1071 known good state).'''
1072 if 'dirstate' in self.__dict__:
1072 if 'dirstate' in self.__dict__:
1073 for k in self.dirstate._filecache:
1073 for k in self.dirstate._filecache:
1074 try:
1074 try:
1075 delattr(self.dirstate, k)
1075 delattr(self.dirstate, k)
1076 except AttributeError:
1076 except AttributeError:
1077 pass
1077 pass
1078 delattr(self, 'dirstate')
1078 delattr(self.unfiltered(), 'dirstate')
1079
1079
1080 def invalidate(self):
1080 def invalidate(self):
1081 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
1081 for k in self._filecache:
1082 for k in self._filecache:
1082 # dirstate is invalidated separately in invalidatedirstate()
1083 # dirstate is invalidated separately in invalidatedirstate()
1083 if k == 'dirstate':
1084 if k == 'dirstate':
1084 continue
1085 continue
1085
1086
1086 try:
1087 try:
1087 delattr(self, k)
1088 delattr(unfiltered, k)
1088 except AttributeError:
1089 except AttributeError:
1089 pass
1090 pass
1090 self.invalidatecaches()
1091 self.invalidatecaches()
1091
1092
1092 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1093 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1093 try:
1094 try:
1094 l = lock.lock(lockname, 0, releasefn, desc=desc)
1095 l = lock.lock(lockname, 0, releasefn, desc=desc)
1095 except error.LockHeld, inst:
1096 except error.LockHeld, inst:
1096 if not wait:
1097 if not wait:
1097 raise
1098 raise
1098 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1099 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1099 (desc, inst.locker))
1100 (desc, inst.locker))
1100 # default to 600 seconds timeout
1101 # default to 600 seconds timeout
1101 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1102 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1102 releasefn, desc=desc)
1103 releasefn, desc=desc)
1103 if acquirefn:
1104 if acquirefn:
1104 acquirefn()
1105 acquirefn()
1105 return l
1106 return l
1106
1107
1107 def _afterlock(self, callback):
1108 def _afterlock(self, callback):
1108 """add a callback to the current repository lock.
1109 """add a callback to the current repository lock.
1109
1110
1110 The callback will be executed on lock release."""
1111 The callback will be executed on lock release."""
1111 l = self._lockref and self._lockref()
1112 l = self._lockref and self._lockref()
1112 if l:
1113 if l:
1113 l.postrelease.append(callback)
1114 l.postrelease.append(callback)
1114 else:
1115 else:
1115 callback()
1116 callback()
1116
1117
1117 def lock(self, wait=True):
1118 def lock(self, wait=True):
1118 '''Lock the repository store (.hg/store) and return a weak reference
1119 '''Lock the repository store (.hg/store) and return a weak reference
1119 to the lock. Use this before modifying the store (e.g. committing or
1120 to the lock. Use this before modifying the store (e.g. committing or
1120 stripping). If you are opening a transaction, get a lock as well.)'''
1121 stripping). If you are opening a transaction, get a lock as well.)'''
1121 l = self._lockref and self._lockref()
1122 l = self._lockref and self._lockref()
1122 if l is not None and l.held:
1123 if l is not None and l.held:
1123 l.lock()
1124 l.lock()
1124 return l
1125 return l
1125
1126
1126 def unlock():
1127 def unlock():
1127 self.store.write()
1128 self.store.write()
1128 if '_phasecache' in vars(self):
1129 if '_phasecache' in vars(self):
1129 self._phasecache.write()
1130 self._phasecache.write()
1130 for k, ce in self._filecache.items():
1131 for k, ce in self._filecache.items():
1131 if k == 'dirstate':
1132 if k == 'dirstate':
1132 continue
1133 continue
1133 ce.refresh()
1134 ce.refresh()
1134
1135
1135 l = self._lock(self.sjoin("lock"), wait, unlock,
1136 l = self._lock(self.sjoin("lock"), wait, unlock,
1136 self.invalidate, _('repository %s') % self.origroot)
1137 self.invalidate, _('repository %s') % self.origroot)
1137 self._lockref = weakref.ref(l)
1138 self._lockref = weakref.ref(l)
1138 return l
1139 return l
1139
1140
1140 def wlock(self, wait=True):
1141 def wlock(self, wait=True):
1141 '''Lock the non-store parts of the repository (everything under
1142 '''Lock the non-store parts of the repository (everything under
1142 .hg except .hg/store) and return a weak reference to the lock.
1143 .hg except .hg/store) and return a weak reference to the lock.
1143 Use this before modifying files in .hg.'''
1144 Use this before modifying files in .hg.'''
1144 l = self._wlockref and self._wlockref()
1145 l = self._wlockref and self._wlockref()
1145 if l is not None and l.held:
1146 if l is not None and l.held:
1146 l.lock()
1147 l.lock()
1147 return l
1148 return l
1148
1149
1149 def unlock():
1150 def unlock():
1150 self.dirstate.write()
1151 self.dirstate.write()
1151 ce = self._filecache.get('dirstate')
1152 ce = self._filecache.get('dirstate')
1152 if ce:
1153 if ce:
1153 ce.refresh()
1154 ce.refresh()
1154
1155
1155 l = self._lock(self.join("wlock"), wait, unlock,
1156 l = self._lock(self.join("wlock"), wait, unlock,
1156 self.invalidatedirstate, _('working directory of %s') %
1157 self.invalidatedirstate, _('working directory of %s') %
1157 self.origroot)
1158 self.origroot)
1158 self._wlockref = weakref.ref(l)
1159 self._wlockref = weakref.ref(l)
1159 return l
1160 return l
1160
1161
1161 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1162 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1162 """
1163 """
1163 commit an individual file as part of a larger transaction
1164 commit an individual file as part of a larger transaction
1164 """
1165 """
1165
1166
1166 fname = fctx.path()
1167 fname = fctx.path()
1167 text = fctx.data()
1168 text = fctx.data()
1168 flog = self.file(fname)
1169 flog = self.file(fname)
1169 fparent1 = manifest1.get(fname, nullid)
1170 fparent1 = manifest1.get(fname, nullid)
1170 fparent2 = fparent2o = manifest2.get(fname, nullid)
1171 fparent2 = fparent2o = manifest2.get(fname, nullid)
1171
1172
1172 meta = {}
1173 meta = {}
1173 copy = fctx.renamed()
1174 copy = fctx.renamed()
1174 if copy and copy[0] != fname:
1175 if copy and copy[0] != fname:
1175 # Mark the new revision of this file as a copy of another
1176 # Mark the new revision of this file as a copy of another
1176 # file. This copy data will effectively act as a parent
1177 # file. This copy data will effectively act as a parent
1177 # of this new revision. If this is a merge, the first
1178 # of this new revision. If this is a merge, the first
1178 # parent will be the nullid (meaning "look up the copy data")
1179 # parent will be the nullid (meaning "look up the copy data")
1179 # and the second one will be the other parent. For example:
1180 # and the second one will be the other parent. For example:
1180 #
1181 #
1181 # 0 --- 1 --- 3 rev1 changes file foo
1182 # 0 --- 1 --- 3 rev1 changes file foo
1182 # \ / rev2 renames foo to bar and changes it
1183 # \ / rev2 renames foo to bar and changes it
1183 # \- 2 -/ rev3 should have bar with all changes and
1184 # \- 2 -/ rev3 should have bar with all changes and
1184 # should record that bar descends from
1185 # should record that bar descends from
1185 # bar in rev2 and foo in rev1
1186 # bar in rev2 and foo in rev1
1186 #
1187 #
1187 # this allows this merge to succeed:
1188 # this allows this merge to succeed:
1188 #
1189 #
1189 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1190 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1190 # \ / merging rev3 and rev4 should use bar@rev2
1191 # \ / merging rev3 and rev4 should use bar@rev2
1191 # \- 2 --- 4 as the merge base
1192 # \- 2 --- 4 as the merge base
1192 #
1193 #
1193
1194
1194 cfname = copy[0]
1195 cfname = copy[0]
1195 crev = manifest1.get(cfname)
1196 crev = manifest1.get(cfname)
1196 newfparent = fparent2
1197 newfparent = fparent2
1197
1198
1198 if manifest2: # branch merge
1199 if manifest2: # branch merge
1199 if fparent2 == nullid or crev is None: # copied on remote side
1200 if fparent2 == nullid or crev is None: # copied on remote side
1200 if cfname in manifest2:
1201 if cfname in manifest2:
1201 crev = manifest2[cfname]
1202 crev = manifest2[cfname]
1202 newfparent = fparent1
1203 newfparent = fparent1
1203
1204
1204 # find source in nearest ancestor if we've lost track
1205 # find source in nearest ancestor if we've lost track
1205 if not crev:
1206 if not crev:
1206 self.ui.debug(" %s: searching for copy revision for %s\n" %
1207 self.ui.debug(" %s: searching for copy revision for %s\n" %
1207 (fname, cfname))
1208 (fname, cfname))
1208 for ancestor in self[None].ancestors():
1209 for ancestor in self[None].ancestors():
1209 if cfname in ancestor:
1210 if cfname in ancestor:
1210 crev = ancestor[cfname].filenode()
1211 crev = ancestor[cfname].filenode()
1211 break
1212 break
1212
1213
1213 if crev:
1214 if crev:
1214 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1215 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1215 meta["copy"] = cfname
1216 meta["copy"] = cfname
1216 meta["copyrev"] = hex(crev)
1217 meta["copyrev"] = hex(crev)
1217 fparent1, fparent2 = nullid, newfparent
1218 fparent1, fparent2 = nullid, newfparent
1218 else:
1219 else:
1219 self.ui.warn(_("warning: can't find ancestor for '%s' "
1220 self.ui.warn(_("warning: can't find ancestor for '%s' "
1220 "copied from '%s'!\n") % (fname, cfname))
1221 "copied from '%s'!\n") % (fname, cfname))
1221
1222
1222 elif fparent2 != nullid:
1223 elif fparent2 != nullid:
1223 # is one parent an ancestor of the other?
1224 # is one parent an ancestor of the other?
1224 fparentancestor = flog.ancestor(fparent1, fparent2)
1225 fparentancestor = flog.ancestor(fparent1, fparent2)
1225 if fparentancestor == fparent1:
1226 if fparentancestor == fparent1:
1226 fparent1, fparent2 = fparent2, nullid
1227 fparent1, fparent2 = fparent2, nullid
1227 elif fparentancestor == fparent2:
1228 elif fparentancestor == fparent2:
1228 fparent2 = nullid
1229 fparent2 = nullid
1229
1230
1230 # is the file changed?
1231 # is the file changed?
1231 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1232 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1232 changelist.append(fname)
1233 changelist.append(fname)
1233 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1234 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1234
1235
1235 # are just the flags changed during merge?
1236 # are just the flags changed during merge?
1236 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1237 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1237 changelist.append(fname)
1238 changelist.append(fname)
1238
1239
1239 return fparent1
1240 return fparent1
1240
1241
1241 def commit(self, text="", user=None, date=None, match=None, force=False,
1242 def commit(self, text="", user=None, date=None, match=None, force=False,
1242 editor=False, extra={}):
1243 editor=False, extra={}):
1243 """Add a new revision to current repository.
1244 """Add a new revision to current repository.
1244
1245
1245 Revision information is gathered from the working directory,
1246 Revision information is gathered from the working directory,
1246 match can be used to filter the committed files. If editor is
1247 match can be used to filter the committed files. If editor is
1247 supplied, it is called to get a commit message.
1248 supplied, it is called to get a commit message.
1248 """
1249 """
1249
1250
1250 def fail(f, msg):
1251 def fail(f, msg):
1251 raise util.Abort('%s: %s' % (f, msg))
1252 raise util.Abort('%s: %s' % (f, msg))
1252
1253
1253 if not match:
1254 if not match:
1254 match = matchmod.always(self.root, '')
1255 match = matchmod.always(self.root, '')
1255
1256
1256 if not force:
1257 if not force:
1257 vdirs = []
1258 vdirs = []
1258 match.dir = vdirs.append
1259 match.dir = vdirs.append
1259 match.bad = fail
1260 match.bad = fail
1260
1261
1261 wlock = self.wlock()
1262 wlock = self.wlock()
1262 try:
1263 try:
1263 wctx = self[None]
1264 wctx = self[None]
1264 merge = len(wctx.parents()) > 1
1265 merge = len(wctx.parents()) > 1
1265
1266
1266 if (not force and merge and match and
1267 if (not force and merge and match and
1267 (match.files() or match.anypats())):
1268 (match.files() or match.anypats())):
1268 raise util.Abort(_('cannot partially commit a merge '
1269 raise util.Abort(_('cannot partially commit a merge '
1269 '(do not specify files or patterns)'))
1270 '(do not specify files or patterns)'))
1270
1271
1271 changes = self.status(match=match, clean=force)
1272 changes = self.status(match=match, clean=force)
1272 if force:
1273 if force:
1273 changes[0].extend(changes[6]) # mq may commit unchanged files
1274 changes[0].extend(changes[6]) # mq may commit unchanged files
1274
1275
1275 # check subrepos
1276 # check subrepos
1276 subs = []
1277 subs = []
1277 commitsubs = set()
1278 commitsubs = set()
1278 newstate = wctx.substate.copy()
1279 newstate = wctx.substate.copy()
1279 # only manage subrepos and .hgsubstate if .hgsub is present
1280 # only manage subrepos and .hgsubstate if .hgsub is present
1280 if '.hgsub' in wctx:
1281 if '.hgsub' in wctx:
1281 # we'll decide whether to track this ourselves, thanks
1282 # we'll decide whether to track this ourselves, thanks
1282 if '.hgsubstate' in changes[0]:
1283 if '.hgsubstate' in changes[0]:
1283 changes[0].remove('.hgsubstate')
1284 changes[0].remove('.hgsubstate')
1284 if '.hgsubstate' in changes[2]:
1285 if '.hgsubstate' in changes[2]:
1285 changes[2].remove('.hgsubstate')
1286 changes[2].remove('.hgsubstate')
1286
1287
1287 # compare current state to last committed state
1288 # compare current state to last committed state
1288 # build new substate based on last committed state
1289 # build new substate based on last committed state
1289 oldstate = wctx.p1().substate
1290 oldstate = wctx.p1().substate
1290 for s in sorted(newstate.keys()):
1291 for s in sorted(newstate.keys()):
1291 if not match(s):
1292 if not match(s):
1292 # ignore working copy, use old state if present
1293 # ignore working copy, use old state if present
1293 if s in oldstate:
1294 if s in oldstate:
1294 newstate[s] = oldstate[s]
1295 newstate[s] = oldstate[s]
1295 continue
1296 continue
1296 if not force:
1297 if not force:
1297 raise util.Abort(
1298 raise util.Abort(
1298 _("commit with new subrepo %s excluded") % s)
1299 _("commit with new subrepo %s excluded") % s)
1299 if wctx.sub(s).dirty(True):
1300 if wctx.sub(s).dirty(True):
1300 if not self.ui.configbool('ui', 'commitsubrepos'):
1301 if not self.ui.configbool('ui', 'commitsubrepos'):
1301 raise util.Abort(
1302 raise util.Abort(
1302 _("uncommitted changes in subrepo %s") % s,
1303 _("uncommitted changes in subrepo %s") % s,
1303 hint=_("use --subrepos for recursive commit"))
1304 hint=_("use --subrepos for recursive commit"))
1304 subs.append(s)
1305 subs.append(s)
1305 commitsubs.add(s)
1306 commitsubs.add(s)
1306 else:
1307 else:
1307 bs = wctx.sub(s).basestate()
1308 bs = wctx.sub(s).basestate()
1308 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1309 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1309 if oldstate.get(s, (None, None, None))[1] != bs:
1310 if oldstate.get(s, (None, None, None))[1] != bs:
1310 subs.append(s)
1311 subs.append(s)
1311
1312
1312 # check for removed subrepos
1313 # check for removed subrepos
1313 for p in wctx.parents():
1314 for p in wctx.parents():
1314 r = [s for s in p.substate if s not in newstate]
1315 r = [s for s in p.substate if s not in newstate]
1315 subs += [s for s in r if match(s)]
1316 subs += [s for s in r if match(s)]
1316 if subs:
1317 if subs:
1317 if (not match('.hgsub') and
1318 if (not match('.hgsub') and
1318 '.hgsub' in (wctx.modified() + wctx.added())):
1319 '.hgsub' in (wctx.modified() + wctx.added())):
1319 raise util.Abort(
1320 raise util.Abort(
1320 _("can't commit subrepos without .hgsub"))
1321 _("can't commit subrepos without .hgsub"))
1321 changes[0].insert(0, '.hgsubstate')
1322 changes[0].insert(0, '.hgsubstate')
1322
1323
1323 elif '.hgsub' in changes[2]:
1324 elif '.hgsub' in changes[2]:
1324 # clean up .hgsubstate when .hgsub is removed
1325 # clean up .hgsubstate when .hgsub is removed
1325 if ('.hgsubstate' in wctx and
1326 if ('.hgsubstate' in wctx and
1326 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1327 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1327 changes[2].insert(0, '.hgsubstate')
1328 changes[2].insert(0, '.hgsubstate')
1328
1329
1329 # make sure all explicit patterns are matched
1330 # make sure all explicit patterns are matched
1330 if not force and match.files():
1331 if not force and match.files():
1331 matched = set(changes[0] + changes[1] + changes[2])
1332 matched = set(changes[0] + changes[1] + changes[2])
1332
1333
1333 for f in match.files():
1334 for f in match.files():
1334 f = self.dirstate.normalize(f)
1335 f = self.dirstate.normalize(f)
1335 if f == '.' or f in matched or f in wctx.substate:
1336 if f == '.' or f in matched or f in wctx.substate:
1336 continue
1337 continue
1337 if f in changes[3]: # missing
1338 if f in changes[3]: # missing
1338 fail(f, _('file not found!'))
1339 fail(f, _('file not found!'))
1339 if f in vdirs: # visited directory
1340 if f in vdirs: # visited directory
1340 d = f + '/'
1341 d = f + '/'
1341 for mf in matched:
1342 for mf in matched:
1342 if mf.startswith(d):
1343 if mf.startswith(d):
1343 break
1344 break
1344 else:
1345 else:
1345 fail(f, _("no match under directory!"))
1346 fail(f, _("no match under directory!"))
1346 elif f not in self.dirstate:
1347 elif f not in self.dirstate:
1347 fail(f, _("file not tracked!"))
1348 fail(f, _("file not tracked!"))
1348
1349
1349 if (not force and not extra.get("close") and not merge
1350 if (not force and not extra.get("close") and not merge
1350 and not (changes[0] or changes[1] or changes[2])
1351 and not (changes[0] or changes[1] or changes[2])
1351 and wctx.branch() == wctx.p1().branch()):
1352 and wctx.branch() == wctx.p1().branch()):
1352 return None
1353 return None
1353
1354
1354 if merge and changes[3]:
1355 if merge and changes[3]:
1355 raise util.Abort(_("cannot commit merge with missing files"))
1356 raise util.Abort(_("cannot commit merge with missing files"))
1356
1357
1357 ms = mergemod.mergestate(self)
1358 ms = mergemod.mergestate(self)
1358 for f in changes[0]:
1359 for f in changes[0]:
1359 if f in ms and ms[f] == 'u':
1360 if f in ms and ms[f] == 'u':
1360 raise util.Abort(_("unresolved merge conflicts "
1361 raise util.Abort(_("unresolved merge conflicts "
1361 "(see hg help resolve)"))
1362 "(see hg help resolve)"))
1362
1363
1363 cctx = context.workingctx(self, text, user, date, extra, changes)
1364 cctx = context.workingctx(self, text, user, date, extra, changes)
1364 if editor:
1365 if editor:
1365 cctx._text = editor(self, cctx, subs)
1366 cctx._text = editor(self, cctx, subs)
1366 edited = (text != cctx._text)
1367 edited = (text != cctx._text)
1367
1368
1368 # commit subs and write new state
1369 # commit subs and write new state
1369 if subs:
1370 if subs:
1370 for s in sorted(commitsubs):
1371 for s in sorted(commitsubs):
1371 sub = wctx.sub(s)
1372 sub = wctx.sub(s)
1372 self.ui.status(_('committing subrepository %s\n') %
1373 self.ui.status(_('committing subrepository %s\n') %
1373 subrepo.subrelpath(sub))
1374 subrepo.subrelpath(sub))
1374 sr = sub.commit(cctx._text, user, date)
1375 sr = sub.commit(cctx._text, user, date)
1375 newstate[s] = (newstate[s][0], sr)
1376 newstate[s] = (newstate[s][0], sr)
1376 subrepo.writestate(self, newstate)
1377 subrepo.writestate(self, newstate)
1377
1378
1378 # Save commit message in case this transaction gets rolled back
1379 # Save commit message in case this transaction gets rolled back
1379 # (e.g. by a pretxncommit hook). Leave the content alone on
1380 # (e.g. by a pretxncommit hook). Leave the content alone on
1380 # the assumption that the user will use the same editor again.
1381 # the assumption that the user will use the same editor again.
1381 msgfn = self.savecommitmessage(cctx._text)
1382 msgfn = self.savecommitmessage(cctx._text)
1382
1383
1383 p1, p2 = self.dirstate.parents()
1384 p1, p2 = self.dirstate.parents()
1384 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1385 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1385 try:
1386 try:
1386 self.hook("precommit", throw=True, parent1=hookp1,
1387 self.hook("precommit", throw=True, parent1=hookp1,
1387 parent2=hookp2)
1388 parent2=hookp2)
1388 ret = self.commitctx(cctx, True)
1389 ret = self.commitctx(cctx, True)
1389 except: # re-raises
1390 except: # re-raises
1390 if edited:
1391 if edited:
1391 self.ui.write(
1392 self.ui.write(
1392 _('note: commit message saved in %s\n') % msgfn)
1393 _('note: commit message saved in %s\n') % msgfn)
1393 raise
1394 raise
1394
1395
1395 # update bookmarks, dirstate and mergestate
1396 # update bookmarks, dirstate and mergestate
1396 bookmarks.update(self, [p1, p2], ret)
1397 bookmarks.update(self, [p1, p2], ret)
1397 for f in changes[0] + changes[1]:
1398 for f in changes[0] + changes[1]:
1398 self.dirstate.normal(f)
1399 self.dirstate.normal(f)
1399 for f in changes[2]:
1400 for f in changes[2]:
1400 self.dirstate.drop(f)
1401 self.dirstate.drop(f)
1401 self.dirstate.setparents(ret)
1402 self.dirstate.setparents(ret)
1402 ms.reset()
1403 ms.reset()
1403 finally:
1404 finally:
1404 wlock.release()
1405 wlock.release()
1405
1406
1406 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1407 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1407 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1408 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1408 self._afterlock(commithook)
1409 self._afterlock(commithook)
1409 return ret
1410 return ret
1410
1411
1411 def commitctx(self, ctx, error=False):
1412 def commitctx(self, ctx, error=False):
1412 """Add a new revision to current repository.
1413 """Add a new revision to current repository.
1413 Revision information is passed via the context argument.
1414 Revision information is passed via the context argument.
1414 """
1415 """
1415
1416
1416 tr = lock = None
1417 tr = lock = None
1417 removed = list(ctx.removed())
1418 removed = list(ctx.removed())
1418 p1, p2 = ctx.p1(), ctx.p2()
1419 p1, p2 = ctx.p1(), ctx.p2()
1419 user = ctx.user()
1420 user = ctx.user()
1420
1421
1421 lock = self.lock()
1422 lock = self.lock()
1422 try:
1423 try:
1423 tr = self.transaction("commit")
1424 tr = self.transaction("commit")
1424 trp = weakref.proxy(tr)
1425 trp = weakref.proxy(tr)
1425
1426
1426 if ctx.files():
1427 if ctx.files():
1427 m1 = p1.manifest().copy()
1428 m1 = p1.manifest().copy()
1428 m2 = p2.manifest()
1429 m2 = p2.manifest()
1429
1430
1430 # check in files
1431 # check in files
1431 new = {}
1432 new = {}
1432 changed = []
1433 changed = []
1433 linkrev = len(self)
1434 linkrev = len(self)
1434 for f in sorted(ctx.modified() + ctx.added()):
1435 for f in sorted(ctx.modified() + ctx.added()):
1435 self.ui.note(f + "\n")
1436 self.ui.note(f + "\n")
1436 try:
1437 try:
1437 fctx = ctx[f]
1438 fctx = ctx[f]
1438 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1439 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1439 changed)
1440 changed)
1440 m1.set(f, fctx.flags())
1441 m1.set(f, fctx.flags())
1441 except OSError, inst:
1442 except OSError, inst:
1442 self.ui.warn(_("trouble committing %s!\n") % f)
1443 self.ui.warn(_("trouble committing %s!\n") % f)
1443 raise
1444 raise
1444 except IOError, inst:
1445 except IOError, inst:
1445 errcode = getattr(inst, 'errno', errno.ENOENT)
1446 errcode = getattr(inst, 'errno', errno.ENOENT)
1446 if error or errcode and errcode != errno.ENOENT:
1447 if error or errcode and errcode != errno.ENOENT:
1447 self.ui.warn(_("trouble committing %s!\n") % f)
1448 self.ui.warn(_("trouble committing %s!\n") % f)
1448 raise
1449 raise
1449 else:
1450 else:
1450 removed.append(f)
1451 removed.append(f)
1451
1452
1452 # update manifest
1453 # update manifest
1453 m1.update(new)
1454 m1.update(new)
1454 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1455 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1455 drop = [f for f in removed if f in m1]
1456 drop = [f for f in removed if f in m1]
1456 for f in drop:
1457 for f in drop:
1457 del m1[f]
1458 del m1[f]
1458 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1459 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1459 p2.manifestnode(), (new, drop))
1460 p2.manifestnode(), (new, drop))
1460 files = changed + removed
1461 files = changed + removed
1461 else:
1462 else:
1462 mn = p1.manifestnode()
1463 mn = p1.manifestnode()
1463 files = []
1464 files = []
1464
1465
1465 # update changelog
1466 # update changelog
1466 self.changelog.delayupdate()
1467 self.changelog.delayupdate()
1467 n = self.changelog.add(mn, files, ctx.description(),
1468 n = self.changelog.add(mn, files, ctx.description(),
1468 trp, p1.node(), p2.node(),
1469 trp, p1.node(), p2.node(),
1469 user, ctx.date(), ctx.extra().copy())
1470 user, ctx.date(), ctx.extra().copy())
1470 p = lambda: self.changelog.writepending() and self.root or ""
1471 p = lambda: self.changelog.writepending() and self.root or ""
1471 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1472 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1472 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1473 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1473 parent2=xp2, pending=p)
1474 parent2=xp2, pending=p)
1474 self.changelog.finalize(trp)
1475 self.changelog.finalize(trp)
1475 # set the new commit is proper phase
1476 # set the new commit is proper phase
1476 targetphase = phases.newcommitphase(self.ui)
1477 targetphase = phases.newcommitphase(self.ui)
1477 if targetphase:
1478 if targetphase:
1478 # retract boundary do not alter parent changeset.
1479 # retract boundary do not alter parent changeset.
1479 # if a parent have higher the resulting phase will
1480 # if a parent have higher the resulting phase will
1480 # be compliant anyway
1481 # be compliant anyway
1481 #
1482 #
1482 # if minimal phase was 0 we don't need to retract anything
1483 # if minimal phase was 0 we don't need to retract anything
1483 phases.retractboundary(self, targetphase, [n])
1484 phases.retractboundary(self, targetphase, [n])
1484 tr.close()
1485 tr.close()
1485 self.updatebranchcache()
1486 self.updatebranchcache()
1486 return n
1487 return n
1487 finally:
1488 finally:
1488 if tr:
1489 if tr:
1489 tr.release()
1490 tr.release()
1490 lock.release()
1491 lock.release()
1491
1492
1493 @unfilteredmeth
1492 def destroyed(self, newheadnodes=None):
1494 def destroyed(self, newheadnodes=None):
1493 '''Inform the repository that nodes have been destroyed.
1495 '''Inform the repository that nodes have been destroyed.
1494 Intended for use by strip and rollback, so there's a common
1496 Intended for use by strip and rollback, so there's a common
1495 place for anything that has to be done after destroying history.
1497 place for anything that has to be done after destroying history.
1496
1498
1497 If you know the branchheadcache was uptodate before nodes were removed
1499 If you know the branchheadcache was uptodate before nodes were removed
1498 and you also know the set of candidate new heads that may have resulted
1500 and you also know the set of candidate new heads that may have resulted
1499 from the destruction, you can set newheadnodes. This will enable the
1501 from the destruction, you can set newheadnodes. This will enable the
1500 code to update the branchheads cache, rather than having future code
1502 code to update the branchheads cache, rather than having future code
1501 decide it's invalid and regenerating it from scratch.
1503 decide it's invalid and regenerating it from scratch.
1502 '''
1504 '''
1503 # If we have info, newheadnodes, on how to update the branch cache, do
1505 # If we have info, newheadnodes, on how to update the branch cache, do
1504 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1506 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1505 # will be caught the next time it is read.
1507 # will be caught the next time it is read.
1506 if newheadnodes:
1508 if newheadnodes:
1507 tiprev = len(self) - 1
1509 tiprev = len(self) - 1
1508 ctxgen = (self[node] for node in newheadnodes
1510 ctxgen = (self[node] for node in newheadnodes
1509 if self.changelog.hasnode(node))
1511 if self.changelog.hasnode(node))
1510 self._updatebranchcache(self._branchcache, ctxgen)
1512 self._updatebranchcache(self._branchcache, ctxgen)
1511 self._writebranchcache(self._branchcache, self.changelog.tip(),
1513 self._writebranchcache(self._branchcache, self.changelog.tip(),
1512 tiprev)
1514 tiprev)
1513
1515
1514 # Ensure the persistent tag cache is updated. Doing it now
1516 # Ensure the persistent tag cache is updated. Doing it now
1515 # means that the tag cache only has to worry about destroyed
1517 # means that the tag cache only has to worry about destroyed
1516 # heads immediately after a strip/rollback. That in turn
1518 # heads immediately after a strip/rollback. That in turn
1517 # guarantees that "cachetip == currenttip" (comparing both rev
1519 # guarantees that "cachetip == currenttip" (comparing both rev
1518 # and node) always means no nodes have been added or destroyed.
1520 # and node) always means no nodes have been added or destroyed.
1519
1521
1520 # XXX this is suboptimal when qrefresh'ing: we strip the current
1522 # XXX this is suboptimal when qrefresh'ing: we strip the current
1521 # head, refresh the tag cache, then immediately add a new head.
1523 # head, refresh the tag cache, then immediately add a new head.
1522 # But I think doing it this way is necessary for the "instant
1524 # But I think doing it this way is necessary for the "instant
1523 # tag cache retrieval" case to work.
1525 # tag cache retrieval" case to work.
1524 self.invalidatecaches()
1526 self.invalidatecaches()
1525
1527
1526 # Discard all cache entries to force reloading everything.
1528 # Discard all cache entries to force reloading everything.
1527 self._filecache.clear()
1529 self._filecache.clear()
1528
1530
1529 def walk(self, match, node=None):
1531 def walk(self, match, node=None):
1530 '''
1532 '''
1531 walk recursively through the directory tree or a given
1533 walk recursively through the directory tree or a given
1532 changeset, finding all files matched by the match
1534 changeset, finding all files matched by the match
1533 function
1535 function
1534 '''
1536 '''
1535 return self[node].walk(match)
1537 return self[node].walk(match)
1536
1538
1537 def status(self, node1='.', node2=None, match=None,
1539 def status(self, node1='.', node2=None, match=None,
1538 ignored=False, clean=False, unknown=False,
1540 ignored=False, clean=False, unknown=False,
1539 listsubrepos=False):
1541 listsubrepos=False):
1540 """return status of files between two nodes or node and working
1542 """return status of files between two nodes or node and working
1541 directory.
1543 directory.
1542
1544
1543 If node1 is None, use the first dirstate parent instead.
1545 If node1 is None, use the first dirstate parent instead.
1544 If node2 is None, compare node1 with working directory.
1546 If node2 is None, compare node1 with working directory.
1545 """
1547 """
1546
1548
1547 def mfmatches(ctx):
1549 def mfmatches(ctx):
1548 mf = ctx.manifest().copy()
1550 mf = ctx.manifest().copy()
1549 if match.always():
1551 if match.always():
1550 return mf
1552 return mf
1551 for fn in mf.keys():
1553 for fn in mf.keys():
1552 if not match(fn):
1554 if not match(fn):
1553 del mf[fn]
1555 del mf[fn]
1554 return mf
1556 return mf
1555
1557
1556 if isinstance(node1, context.changectx):
1558 if isinstance(node1, context.changectx):
1557 ctx1 = node1
1559 ctx1 = node1
1558 else:
1560 else:
1559 ctx1 = self[node1]
1561 ctx1 = self[node1]
1560 if isinstance(node2, context.changectx):
1562 if isinstance(node2, context.changectx):
1561 ctx2 = node2
1563 ctx2 = node2
1562 else:
1564 else:
1563 ctx2 = self[node2]
1565 ctx2 = self[node2]
1564
1566
1565 working = ctx2.rev() is None
1567 working = ctx2.rev() is None
1566 parentworking = working and ctx1 == self['.']
1568 parentworking = working and ctx1 == self['.']
1567 match = match or matchmod.always(self.root, self.getcwd())
1569 match = match or matchmod.always(self.root, self.getcwd())
1568 listignored, listclean, listunknown = ignored, clean, unknown
1570 listignored, listclean, listunknown = ignored, clean, unknown
1569
1571
1570 # load earliest manifest first for caching reasons
1572 # load earliest manifest first for caching reasons
1571 if not working and ctx2.rev() < ctx1.rev():
1573 if not working and ctx2.rev() < ctx1.rev():
1572 ctx2.manifest()
1574 ctx2.manifest()
1573
1575
1574 if not parentworking:
1576 if not parentworking:
1575 def bad(f, msg):
1577 def bad(f, msg):
1576 # 'f' may be a directory pattern from 'match.files()',
1578 # 'f' may be a directory pattern from 'match.files()',
1577 # so 'f not in ctx1' is not enough
1579 # so 'f not in ctx1' is not enough
1578 if f not in ctx1 and f not in ctx1.dirs():
1580 if f not in ctx1 and f not in ctx1.dirs():
1579 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1581 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1580 match.bad = bad
1582 match.bad = bad
1581
1583
1582 if working: # we need to scan the working dir
1584 if working: # we need to scan the working dir
1583 subrepos = []
1585 subrepos = []
1584 if '.hgsub' in self.dirstate:
1586 if '.hgsub' in self.dirstate:
1585 subrepos = ctx2.substate.keys()
1587 subrepos = ctx2.substate.keys()
1586 s = self.dirstate.status(match, subrepos, listignored,
1588 s = self.dirstate.status(match, subrepos, listignored,
1587 listclean, listunknown)
1589 listclean, listunknown)
1588 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1590 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1589
1591
1590 # check for any possibly clean files
1592 # check for any possibly clean files
1591 if parentworking and cmp:
1593 if parentworking and cmp:
1592 fixup = []
1594 fixup = []
1593 # do a full compare of any files that might have changed
1595 # do a full compare of any files that might have changed
1594 for f in sorted(cmp):
1596 for f in sorted(cmp):
1595 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1597 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1596 or ctx1[f].cmp(ctx2[f])):
1598 or ctx1[f].cmp(ctx2[f])):
1597 modified.append(f)
1599 modified.append(f)
1598 else:
1600 else:
1599 fixup.append(f)
1601 fixup.append(f)
1600
1602
1601 # update dirstate for files that are actually clean
1603 # update dirstate for files that are actually clean
1602 if fixup:
1604 if fixup:
1603 if listclean:
1605 if listclean:
1604 clean += fixup
1606 clean += fixup
1605
1607
1606 try:
1608 try:
1607 # updating the dirstate is optional
1609 # updating the dirstate is optional
1608 # so we don't wait on the lock
1610 # so we don't wait on the lock
1609 wlock = self.wlock(False)
1611 wlock = self.wlock(False)
1610 try:
1612 try:
1611 for f in fixup:
1613 for f in fixup:
1612 self.dirstate.normal(f)
1614 self.dirstate.normal(f)
1613 finally:
1615 finally:
1614 wlock.release()
1616 wlock.release()
1615 except error.LockError:
1617 except error.LockError:
1616 pass
1618 pass
1617
1619
1618 if not parentworking:
1620 if not parentworking:
1619 mf1 = mfmatches(ctx1)
1621 mf1 = mfmatches(ctx1)
1620 if working:
1622 if working:
1621 # we are comparing working dir against non-parent
1623 # we are comparing working dir against non-parent
1622 # generate a pseudo-manifest for the working dir
1624 # generate a pseudo-manifest for the working dir
1623 mf2 = mfmatches(self['.'])
1625 mf2 = mfmatches(self['.'])
1624 for f in cmp + modified + added:
1626 for f in cmp + modified + added:
1625 mf2[f] = None
1627 mf2[f] = None
1626 mf2.set(f, ctx2.flags(f))
1628 mf2.set(f, ctx2.flags(f))
1627 for f in removed:
1629 for f in removed:
1628 if f in mf2:
1630 if f in mf2:
1629 del mf2[f]
1631 del mf2[f]
1630 else:
1632 else:
1631 # we are comparing two revisions
1633 # we are comparing two revisions
1632 deleted, unknown, ignored = [], [], []
1634 deleted, unknown, ignored = [], [], []
1633 mf2 = mfmatches(ctx2)
1635 mf2 = mfmatches(ctx2)
1634
1636
1635 modified, added, clean = [], [], []
1637 modified, added, clean = [], [], []
1636 withflags = mf1.withflags() | mf2.withflags()
1638 withflags = mf1.withflags() | mf2.withflags()
1637 for fn in mf2:
1639 for fn in mf2:
1638 if fn in mf1:
1640 if fn in mf1:
1639 if (fn not in deleted and
1641 if (fn not in deleted and
1640 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1642 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1641 (mf1[fn] != mf2[fn] and
1643 (mf1[fn] != mf2[fn] and
1642 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1644 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1643 modified.append(fn)
1645 modified.append(fn)
1644 elif listclean:
1646 elif listclean:
1645 clean.append(fn)
1647 clean.append(fn)
1646 del mf1[fn]
1648 del mf1[fn]
1647 elif fn not in deleted:
1649 elif fn not in deleted:
1648 added.append(fn)
1650 added.append(fn)
1649 removed = mf1.keys()
1651 removed = mf1.keys()
1650
1652
1651 if working and modified and not self.dirstate._checklink:
1653 if working and modified and not self.dirstate._checklink:
1652 # Symlink placeholders may get non-symlink-like contents
1654 # Symlink placeholders may get non-symlink-like contents
1653 # via user error or dereferencing by NFS or Samba servers,
1655 # via user error or dereferencing by NFS or Samba servers,
1654 # so we filter out any placeholders that don't look like a
1656 # so we filter out any placeholders that don't look like a
1655 # symlink
1657 # symlink
1656 sane = []
1658 sane = []
1657 for f in modified:
1659 for f in modified:
1658 if ctx2.flags(f) == 'l':
1660 if ctx2.flags(f) == 'l':
1659 d = ctx2[f].data()
1661 d = ctx2[f].data()
1660 if len(d) >= 1024 or '\n' in d or util.binary(d):
1662 if len(d) >= 1024 or '\n' in d or util.binary(d):
1661 self.ui.debug('ignoring suspect symlink placeholder'
1663 self.ui.debug('ignoring suspect symlink placeholder'
1662 ' "%s"\n' % f)
1664 ' "%s"\n' % f)
1663 continue
1665 continue
1664 sane.append(f)
1666 sane.append(f)
1665 modified = sane
1667 modified = sane
1666
1668
1667 r = modified, added, removed, deleted, unknown, ignored, clean
1669 r = modified, added, removed, deleted, unknown, ignored, clean
1668
1670
1669 if listsubrepos:
1671 if listsubrepos:
1670 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1672 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1671 if working:
1673 if working:
1672 rev2 = None
1674 rev2 = None
1673 else:
1675 else:
1674 rev2 = ctx2.substate[subpath][1]
1676 rev2 = ctx2.substate[subpath][1]
1675 try:
1677 try:
1676 submatch = matchmod.narrowmatcher(subpath, match)
1678 submatch = matchmod.narrowmatcher(subpath, match)
1677 s = sub.status(rev2, match=submatch, ignored=listignored,
1679 s = sub.status(rev2, match=submatch, ignored=listignored,
1678 clean=listclean, unknown=listunknown,
1680 clean=listclean, unknown=listunknown,
1679 listsubrepos=True)
1681 listsubrepos=True)
1680 for rfiles, sfiles in zip(r, s):
1682 for rfiles, sfiles in zip(r, s):
1681 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1683 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1682 except error.LookupError:
1684 except error.LookupError:
1683 self.ui.status(_("skipping missing subrepository: %s\n")
1685 self.ui.status(_("skipping missing subrepository: %s\n")
1684 % subpath)
1686 % subpath)
1685
1687
1686 for l in r:
1688 for l in r:
1687 l.sort()
1689 l.sort()
1688 return r
1690 return r
1689
1691
1690 def heads(self, start=None):
1692 def heads(self, start=None):
1691 heads = self.changelog.heads(start)
1693 heads = self.changelog.heads(start)
1692 # sort the output in rev descending order
1694 # sort the output in rev descending order
1693 return sorted(heads, key=self.changelog.rev, reverse=True)
1695 return sorted(heads, key=self.changelog.rev, reverse=True)
1694
1696
1695 def branchheads(self, branch=None, start=None, closed=False):
1697 def branchheads(self, branch=None, start=None, closed=False):
1696 '''return a (possibly filtered) list of heads for the given branch
1698 '''return a (possibly filtered) list of heads for the given branch
1697
1699
1698 Heads are returned in topological order, from newest to oldest.
1700 Heads are returned in topological order, from newest to oldest.
1699 If branch is None, use the dirstate branch.
1701 If branch is None, use the dirstate branch.
1700 If start is not None, return only heads reachable from start.
1702 If start is not None, return only heads reachable from start.
1701 If closed is True, return heads that are marked as closed as well.
1703 If closed is True, return heads that are marked as closed as well.
1702 '''
1704 '''
1703 if branch is None:
1705 if branch is None:
1704 branch = self[None].branch()
1706 branch = self[None].branch()
1705 branches = self.branchmap()
1707 branches = self.branchmap()
1706 if branch not in branches:
1708 if branch not in branches:
1707 return []
1709 return []
1708 # the cache returns heads ordered lowest to highest
1710 # the cache returns heads ordered lowest to highest
1709 bheads = list(reversed(branches[branch]))
1711 bheads = list(reversed(branches[branch]))
1710 if start is not None:
1712 if start is not None:
1711 # filter out the heads that cannot be reached from startrev
1713 # filter out the heads that cannot be reached from startrev
1712 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1714 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1713 bheads = [h for h in bheads if h in fbheads]
1715 bheads = [h for h in bheads if h in fbheads]
1714 if not closed:
1716 if not closed:
1715 bheads = [h for h in bheads if not self[h].closesbranch()]
1717 bheads = [h for h in bheads if not self[h].closesbranch()]
1716 return bheads
1718 return bheads
1717
1719
1718 def branches(self, nodes):
1720 def branches(self, nodes):
1719 if not nodes:
1721 if not nodes:
1720 nodes = [self.changelog.tip()]
1722 nodes = [self.changelog.tip()]
1721 b = []
1723 b = []
1722 for n in nodes:
1724 for n in nodes:
1723 t = n
1725 t = n
1724 while True:
1726 while True:
1725 p = self.changelog.parents(n)
1727 p = self.changelog.parents(n)
1726 if p[1] != nullid or p[0] == nullid:
1728 if p[1] != nullid or p[0] == nullid:
1727 b.append((t, n, p[0], p[1]))
1729 b.append((t, n, p[0], p[1]))
1728 break
1730 break
1729 n = p[0]
1731 n = p[0]
1730 return b
1732 return b
1731
1733
1732 def between(self, pairs):
1734 def between(self, pairs):
1733 r = []
1735 r = []
1734
1736
1735 for top, bottom in pairs:
1737 for top, bottom in pairs:
1736 n, l, i = top, [], 0
1738 n, l, i = top, [], 0
1737 f = 1
1739 f = 1
1738
1740
1739 while n != bottom and n != nullid:
1741 while n != bottom and n != nullid:
1740 p = self.changelog.parents(n)[0]
1742 p = self.changelog.parents(n)[0]
1741 if i == f:
1743 if i == f:
1742 l.append(n)
1744 l.append(n)
1743 f = f * 2
1745 f = f * 2
1744 n = p
1746 n = p
1745 i += 1
1747 i += 1
1746
1748
1747 r.append(l)
1749 r.append(l)
1748
1750
1749 return r
1751 return r
1750
1752
1751 def pull(self, remote, heads=None, force=False):
1753 def pull(self, remote, heads=None, force=False):
1752 # don't open transaction for nothing or you break future useful
1754 # don't open transaction for nothing or you break future useful
1753 # rollback call
1755 # rollback call
1754 tr = None
1756 tr = None
1755 trname = 'pull\n' + util.hidepassword(remote.url())
1757 trname = 'pull\n' + util.hidepassword(remote.url())
1756 lock = self.lock()
1758 lock = self.lock()
1757 try:
1759 try:
1758 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1760 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1759 force=force)
1761 force=force)
1760 common, fetch, rheads = tmp
1762 common, fetch, rheads = tmp
1761 if not fetch:
1763 if not fetch:
1762 self.ui.status(_("no changes found\n"))
1764 self.ui.status(_("no changes found\n"))
1763 added = []
1765 added = []
1764 result = 0
1766 result = 0
1765 else:
1767 else:
1766 tr = self.transaction(trname)
1768 tr = self.transaction(trname)
1767 if heads is None and list(common) == [nullid]:
1769 if heads is None and list(common) == [nullid]:
1768 self.ui.status(_("requesting all changes\n"))
1770 self.ui.status(_("requesting all changes\n"))
1769 elif heads is None and remote.capable('changegroupsubset'):
1771 elif heads is None and remote.capable('changegroupsubset'):
1770 # issue1320, avoid a race if remote changed after discovery
1772 # issue1320, avoid a race if remote changed after discovery
1771 heads = rheads
1773 heads = rheads
1772
1774
1773 if remote.capable('getbundle'):
1775 if remote.capable('getbundle'):
1774 cg = remote.getbundle('pull', common=common,
1776 cg = remote.getbundle('pull', common=common,
1775 heads=heads or rheads)
1777 heads=heads or rheads)
1776 elif heads is None:
1778 elif heads is None:
1777 cg = remote.changegroup(fetch, 'pull')
1779 cg = remote.changegroup(fetch, 'pull')
1778 elif not remote.capable('changegroupsubset'):
1780 elif not remote.capable('changegroupsubset'):
1779 raise util.Abort(_("partial pull cannot be done because "
1781 raise util.Abort(_("partial pull cannot be done because "
1780 "other repository doesn't support "
1782 "other repository doesn't support "
1781 "changegroupsubset."))
1783 "changegroupsubset."))
1782 else:
1784 else:
1783 cg = remote.changegroupsubset(fetch, heads, 'pull')
1785 cg = remote.changegroupsubset(fetch, heads, 'pull')
1784 clstart = len(self.changelog)
1786 clstart = len(self.changelog)
1785 result = self.addchangegroup(cg, 'pull', remote.url())
1787 result = self.addchangegroup(cg, 'pull', remote.url())
1786 clend = len(self.changelog)
1788 clend = len(self.changelog)
1787 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1789 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1788
1790
1789 # compute target subset
1791 # compute target subset
1790 if heads is None:
1792 if heads is None:
1791 # We pulled every thing possible
1793 # We pulled every thing possible
1792 # sync on everything common
1794 # sync on everything common
1793 subset = common + added
1795 subset = common + added
1794 else:
1796 else:
1795 # We pulled a specific subset
1797 # We pulled a specific subset
1796 # sync on this subset
1798 # sync on this subset
1797 subset = heads
1799 subset = heads
1798
1800
1799 # Get remote phases data from remote
1801 # Get remote phases data from remote
1800 remotephases = remote.listkeys('phases')
1802 remotephases = remote.listkeys('phases')
1801 publishing = bool(remotephases.get('publishing', False))
1803 publishing = bool(remotephases.get('publishing', False))
1802 if remotephases and not publishing:
1804 if remotephases and not publishing:
1803 # remote is new and unpublishing
1805 # remote is new and unpublishing
1804 pheads, _dr = phases.analyzeremotephases(self, subset,
1806 pheads, _dr = phases.analyzeremotephases(self, subset,
1805 remotephases)
1807 remotephases)
1806 phases.advanceboundary(self, phases.public, pheads)
1808 phases.advanceboundary(self, phases.public, pheads)
1807 phases.advanceboundary(self, phases.draft, subset)
1809 phases.advanceboundary(self, phases.draft, subset)
1808 else:
1810 else:
1809 # Remote is old or publishing all common changesets
1811 # Remote is old or publishing all common changesets
1810 # should be seen as public
1812 # should be seen as public
1811 phases.advanceboundary(self, phases.public, subset)
1813 phases.advanceboundary(self, phases.public, subset)
1812
1814
1813 if obsolete._enabled:
1815 if obsolete._enabled:
1814 self.ui.debug('fetching remote obsolete markers\n')
1816 self.ui.debug('fetching remote obsolete markers\n')
1815 remoteobs = remote.listkeys('obsolete')
1817 remoteobs = remote.listkeys('obsolete')
1816 if 'dump0' in remoteobs:
1818 if 'dump0' in remoteobs:
1817 if tr is None:
1819 if tr is None:
1818 tr = self.transaction(trname)
1820 tr = self.transaction(trname)
1819 for key in sorted(remoteobs, reverse=True):
1821 for key in sorted(remoteobs, reverse=True):
1820 if key.startswith('dump'):
1822 if key.startswith('dump'):
1821 data = base85.b85decode(remoteobs[key])
1823 data = base85.b85decode(remoteobs[key])
1822 self.obsstore.mergemarkers(tr, data)
1824 self.obsstore.mergemarkers(tr, data)
1823 if tr is not None:
1825 if tr is not None:
1824 tr.close()
1826 tr.close()
1825 finally:
1827 finally:
1826 if tr is not None:
1828 if tr is not None:
1827 tr.release()
1829 tr.release()
1828 lock.release()
1830 lock.release()
1829
1831
1830 return result
1832 return result
1831
1833
1832 def checkpush(self, force, revs):
1834 def checkpush(self, force, revs):
1833 """Extensions can override this function if additional checks have
1835 """Extensions can override this function if additional checks have
1834 to be performed before pushing, or call it if they override push
1836 to be performed before pushing, or call it if they override push
1835 command.
1837 command.
1836 """
1838 """
1837 pass
1839 pass
1838
1840
1839 def push(self, remote, force=False, revs=None, newbranch=False):
1841 def push(self, remote, force=False, revs=None, newbranch=False):
1840 '''Push outgoing changesets (limited by revs) from the current
1842 '''Push outgoing changesets (limited by revs) from the current
1841 repository to remote. Return an integer:
1843 repository to remote. Return an integer:
1842 - None means nothing to push
1844 - None means nothing to push
1843 - 0 means HTTP error
1845 - 0 means HTTP error
1844 - 1 means we pushed and remote head count is unchanged *or*
1846 - 1 means we pushed and remote head count is unchanged *or*
1845 we have outgoing changesets but refused to push
1847 we have outgoing changesets but refused to push
1846 - other values as described by addchangegroup()
1848 - other values as described by addchangegroup()
1847 '''
1849 '''
1848 # there are two ways to push to remote repo:
1850 # there are two ways to push to remote repo:
1849 #
1851 #
1850 # addchangegroup assumes local user can lock remote
1852 # addchangegroup assumes local user can lock remote
1851 # repo (local filesystem, old ssh servers).
1853 # repo (local filesystem, old ssh servers).
1852 #
1854 #
1853 # unbundle assumes local user cannot lock remote repo (new ssh
1855 # unbundle assumes local user cannot lock remote repo (new ssh
1854 # servers, http servers).
1856 # servers, http servers).
1855
1857
1856 if not remote.canpush():
1858 if not remote.canpush():
1857 raise util.Abort(_("destination does not support push"))
1859 raise util.Abort(_("destination does not support push"))
1858 # get local lock as we might write phase data
1860 # get local lock as we might write phase data
1859 locallock = self.lock()
1861 locallock = self.lock()
1860 try:
1862 try:
1861 self.checkpush(force, revs)
1863 self.checkpush(force, revs)
1862 lock = None
1864 lock = None
1863 unbundle = remote.capable('unbundle')
1865 unbundle = remote.capable('unbundle')
1864 if not unbundle:
1866 if not unbundle:
1865 lock = remote.lock()
1867 lock = remote.lock()
1866 try:
1868 try:
1867 # discovery
1869 # discovery
1868 fci = discovery.findcommonincoming
1870 fci = discovery.findcommonincoming
1869 commoninc = fci(self, remote, force=force)
1871 commoninc = fci(self, remote, force=force)
1870 common, inc, remoteheads = commoninc
1872 common, inc, remoteheads = commoninc
1871 fco = discovery.findcommonoutgoing
1873 fco = discovery.findcommonoutgoing
1872 outgoing = fco(self, remote, onlyheads=revs,
1874 outgoing = fco(self, remote, onlyheads=revs,
1873 commoninc=commoninc, force=force)
1875 commoninc=commoninc, force=force)
1874
1876
1875
1877
1876 if not outgoing.missing:
1878 if not outgoing.missing:
1877 # nothing to push
1879 # nothing to push
1878 scmutil.nochangesfound(self.ui, self, outgoing.excluded)
1880 scmutil.nochangesfound(self.ui, self, outgoing.excluded)
1879 ret = None
1881 ret = None
1880 else:
1882 else:
1881 # something to push
1883 # something to push
1882 if not force:
1884 if not force:
1883 # if self.obsstore == False --> no obsolete
1885 # if self.obsstore == False --> no obsolete
1884 # then, save the iteration
1886 # then, save the iteration
1885 if self.obsstore:
1887 if self.obsstore:
1886 # this message are here for 80 char limit reason
1888 # this message are here for 80 char limit reason
1887 mso = _("push includes obsolete changeset: %s!")
1889 mso = _("push includes obsolete changeset: %s!")
1888 msu = _("push includes unstable changeset: %s!")
1890 msu = _("push includes unstable changeset: %s!")
1889 msb = _("push includes bumped changeset: %s!")
1891 msb = _("push includes bumped changeset: %s!")
1890 # If we are to push if there is at least one
1892 # If we are to push if there is at least one
1891 # obsolete or unstable changeset in missing, at
1893 # obsolete or unstable changeset in missing, at
1892 # least one of the missinghead will be obsolete or
1894 # least one of the missinghead will be obsolete or
1893 # unstable. So checking heads only is ok
1895 # unstable. So checking heads only is ok
1894 for node in outgoing.missingheads:
1896 for node in outgoing.missingheads:
1895 ctx = self[node]
1897 ctx = self[node]
1896 if ctx.obsolete():
1898 if ctx.obsolete():
1897 raise util.Abort(mso % ctx)
1899 raise util.Abort(mso % ctx)
1898 elif ctx.unstable():
1900 elif ctx.unstable():
1899 raise util.Abort(msu % ctx)
1901 raise util.Abort(msu % ctx)
1900 elif ctx.bumped():
1902 elif ctx.bumped():
1901 raise util.Abort(msb % ctx)
1903 raise util.Abort(msb % ctx)
1902 discovery.checkheads(self, remote, outgoing,
1904 discovery.checkheads(self, remote, outgoing,
1903 remoteheads, newbranch,
1905 remoteheads, newbranch,
1904 bool(inc))
1906 bool(inc))
1905
1907
1906 # create a changegroup from local
1908 # create a changegroup from local
1907 if revs is None and not outgoing.excluded:
1909 if revs is None and not outgoing.excluded:
1908 # push everything,
1910 # push everything,
1909 # use the fast path, no race possible on push
1911 # use the fast path, no race possible on push
1910 cg = self._changegroup(outgoing.missing, 'push')
1912 cg = self._changegroup(outgoing.missing, 'push')
1911 else:
1913 else:
1912 cg = self.getlocalbundle('push', outgoing)
1914 cg = self.getlocalbundle('push', outgoing)
1913
1915
1914 # apply changegroup to remote
1916 # apply changegroup to remote
1915 if unbundle:
1917 if unbundle:
1916 # local repo finds heads on server, finds out what
1918 # local repo finds heads on server, finds out what
1917 # revs it must push. once revs transferred, if server
1919 # revs it must push. once revs transferred, if server
1918 # finds it has different heads (someone else won
1920 # finds it has different heads (someone else won
1919 # commit/push race), server aborts.
1921 # commit/push race), server aborts.
1920 if force:
1922 if force:
1921 remoteheads = ['force']
1923 remoteheads = ['force']
1922 # ssh: return remote's addchangegroup()
1924 # ssh: return remote's addchangegroup()
1923 # http: return remote's addchangegroup() or 0 for error
1925 # http: return remote's addchangegroup() or 0 for error
1924 ret = remote.unbundle(cg, remoteheads, 'push')
1926 ret = remote.unbundle(cg, remoteheads, 'push')
1925 else:
1927 else:
1926 # we return an integer indicating remote head count
1928 # we return an integer indicating remote head count
1927 # change
1929 # change
1928 ret = remote.addchangegroup(cg, 'push', self.url())
1930 ret = remote.addchangegroup(cg, 'push', self.url())
1929
1931
1930 if ret:
1932 if ret:
1931 # push succeed, synchronize target of the push
1933 # push succeed, synchronize target of the push
1932 cheads = outgoing.missingheads
1934 cheads = outgoing.missingheads
1933 elif revs is None:
1935 elif revs is None:
1934 # All out push fails. synchronize all common
1936 # All out push fails. synchronize all common
1935 cheads = outgoing.commonheads
1937 cheads = outgoing.commonheads
1936 else:
1938 else:
1937 # I want cheads = heads(::missingheads and ::commonheads)
1939 # I want cheads = heads(::missingheads and ::commonheads)
1938 # (missingheads is revs with secret changeset filtered out)
1940 # (missingheads is revs with secret changeset filtered out)
1939 #
1941 #
1940 # This can be expressed as:
1942 # This can be expressed as:
1941 # cheads = ( (missingheads and ::commonheads)
1943 # cheads = ( (missingheads and ::commonheads)
1942 # + (commonheads and ::missingheads))"
1944 # + (commonheads and ::missingheads))"
1943 # )
1945 # )
1944 #
1946 #
1945 # while trying to push we already computed the following:
1947 # while trying to push we already computed the following:
1946 # common = (::commonheads)
1948 # common = (::commonheads)
1947 # missing = ((commonheads::missingheads) - commonheads)
1949 # missing = ((commonheads::missingheads) - commonheads)
1948 #
1950 #
1949 # We can pick:
1951 # We can pick:
1950 # * missingheads part of common (::commonheads)
1952 # * missingheads part of common (::commonheads)
1951 common = set(outgoing.common)
1953 common = set(outgoing.common)
1952 cheads = [node for node in revs if node in common]
1954 cheads = [node for node in revs if node in common]
1953 # and
1955 # and
1954 # * commonheads parents on missing
1956 # * commonheads parents on missing
1955 revset = self.set('%ln and parents(roots(%ln))',
1957 revset = self.set('%ln and parents(roots(%ln))',
1956 outgoing.commonheads,
1958 outgoing.commonheads,
1957 outgoing.missing)
1959 outgoing.missing)
1958 cheads.extend(c.node() for c in revset)
1960 cheads.extend(c.node() for c in revset)
1959 # even when we don't push, exchanging phase data is useful
1961 # even when we don't push, exchanging phase data is useful
1960 remotephases = remote.listkeys('phases')
1962 remotephases = remote.listkeys('phases')
1961 if not remotephases: # old server or public only repo
1963 if not remotephases: # old server or public only repo
1962 phases.advanceboundary(self, phases.public, cheads)
1964 phases.advanceboundary(self, phases.public, cheads)
1963 # don't push any phase data as there is nothing to push
1965 # don't push any phase data as there is nothing to push
1964 else:
1966 else:
1965 ana = phases.analyzeremotephases(self, cheads, remotephases)
1967 ana = phases.analyzeremotephases(self, cheads, remotephases)
1966 pheads, droots = ana
1968 pheads, droots = ana
1967 ### Apply remote phase on local
1969 ### Apply remote phase on local
1968 if remotephases.get('publishing', False):
1970 if remotephases.get('publishing', False):
1969 phases.advanceboundary(self, phases.public, cheads)
1971 phases.advanceboundary(self, phases.public, cheads)
1970 else: # publish = False
1972 else: # publish = False
1971 phases.advanceboundary(self, phases.public, pheads)
1973 phases.advanceboundary(self, phases.public, pheads)
1972 phases.advanceboundary(self, phases.draft, cheads)
1974 phases.advanceboundary(self, phases.draft, cheads)
1973 ### Apply local phase on remote
1975 ### Apply local phase on remote
1974
1976
1975 # Get the list of all revs draft on remote by public here.
1977 # Get the list of all revs draft on remote by public here.
1976 # XXX Beware that revset break if droots is not strictly
1978 # XXX Beware that revset break if droots is not strictly
1977 # XXX root we may want to ensure it is but it is costly
1979 # XXX root we may want to ensure it is but it is costly
1978 outdated = self.set('heads((%ln::%ln) and public())',
1980 outdated = self.set('heads((%ln::%ln) and public())',
1979 droots, cheads)
1981 droots, cheads)
1980 for newremotehead in outdated:
1982 for newremotehead in outdated:
1981 r = remote.pushkey('phases',
1983 r = remote.pushkey('phases',
1982 newremotehead.hex(),
1984 newremotehead.hex(),
1983 str(phases.draft),
1985 str(phases.draft),
1984 str(phases.public))
1986 str(phases.public))
1985 if not r:
1987 if not r:
1986 self.ui.warn(_('updating %s to public failed!\n')
1988 self.ui.warn(_('updating %s to public failed!\n')
1987 % newremotehead)
1989 % newremotehead)
1988 self.ui.debug('try to push obsolete markers to remote\n')
1990 self.ui.debug('try to push obsolete markers to remote\n')
1989 if (obsolete._enabled and self.obsstore and
1991 if (obsolete._enabled and self.obsstore and
1990 'obsolete' in remote.listkeys('namespaces')):
1992 'obsolete' in remote.listkeys('namespaces')):
1991 rslts = []
1993 rslts = []
1992 remotedata = self.listkeys('obsolete')
1994 remotedata = self.listkeys('obsolete')
1993 for key in sorted(remotedata, reverse=True):
1995 for key in sorted(remotedata, reverse=True):
1994 # reverse sort to ensure we end with dump0
1996 # reverse sort to ensure we end with dump0
1995 data = remotedata[key]
1997 data = remotedata[key]
1996 rslts.append(remote.pushkey('obsolete', key, '', data))
1998 rslts.append(remote.pushkey('obsolete', key, '', data))
1997 if [r for r in rslts if not r]:
1999 if [r for r in rslts if not r]:
1998 msg = _('failed to push some obsolete markers!\n')
2000 msg = _('failed to push some obsolete markers!\n')
1999 self.ui.warn(msg)
2001 self.ui.warn(msg)
2000 finally:
2002 finally:
2001 if lock is not None:
2003 if lock is not None:
2002 lock.release()
2004 lock.release()
2003 finally:
2005 finally:
2004 locallock.release()
2006 locallock.release()
2005
2007
2006 self.ui.debug("checking for updated bookmarks\n")
2008 self.ui.debug("checking for updated bookmarks\n")
2007 rb = remote.listkeys('bookmarks')
2009 rb = remote.listkeys('bookmarks')
2008 for k in rb.keys():
2010 for k in rb.keys():
2009 if k in self._bookmarks:
2011 if k in self._bookmarks:
2010 nr, nl = rb[k], hex(self._bookmarks[k])
2012 nr, nl = rb[k], hex(self._bookmarks[k])
2011 if nr in self:
2013 if nr in self:
2012 cr = self[nr]
2014 cr = self[nr]
2013 cl = self[nl]
2015 cl = self[nl]
2014 if bookmarks.validdest(self, cr, cl):
2016 if bookmarks.validdest(self, cr, cl):
2015 r = remote.pushkey('bookmarks', k, nr, nl)
2017 r = remote.pushkey('bookmarks', k, nr, nl)
2016 if r:
2018 if r:
2017 self.ui.status(_("updating bookmark %s\n") % k)
2019 self.ui.status(_("updating bookmark %s\n") % k)
2018 else:
2020 else:
2019 self.ui.warn(_('updating bookmark %s'
2021 self.ui.warn(_('updating bookmark %s'
2020 ' failed!\n') % k)
2022 ' failed!\n') % k)
2021
2023
2022 return ret
2024 return ret
2023
2025
2024 def changegroupinfo(self, nodes, source):
2026 def changegroupinfo(self, nodes, source):
2025 if self.ui.verbose or source == 'bundle':
2027 if self.ui.verbose or source == 'bundle':
2026 self.ui.status(_("%d changesets found\n") % len(nodes))
2028 self.ui.status(_("%d changesets found\n") % len(nodes))
2027 if self.ui.debugflag:
2029 if self.ui.debugflag:
2028 self.ui.debug("list of changesets:\n")
2030 self.ui.debug("list of changesets:\n")
2029 for node in nodes:
2031 for node in nodes:
2030 self.ui.debug("%s\n" % hex(node))
2032 self.ui.debug("%s\n" % hex(node))
2031
2033
2032 def changegroupsubset(self, bases, heads, source):
2034 def changegroupsubset(self, bases, heads, source):
2033 """Compute a changegroup consisting of all the nodes that are
2035 """Compute a changegroup consisting of all the nodes that are
2034 descendants of any of the bases and ancestors of any of the heads.
2036 descendants of any of the bases and ancestors of any of the heads.
2035 Return a chunkbuffer object whose read() method will return
2037 Return a chunkbuffer object whose read() method will return
2036 successive changegroup chunks.
2038 successive changegroup chunks.
2037
2039
2038 It is fairly complex as determining which filenodes and which
2040 It is fairly complex as determining which filenodes and which
2039 manifest nodes need to be included for the changeset to be complete
2041 manifest nodes need to be included for the changeset to be complete
2040 is non-trivial.
2042 is non-trivial.
2041
2043
2042 Another wrinkle is doing the reverse, figuring out which changeset in
2044 Another wrinkle is doing the reverse, figuring out which changeset in
2043 the changegroup a particular filenode or manifestnode belongs to.
2045 the changegroup a particular filenode or manifestnode belongs to.
2044 """
2046 """
2045 cl = self.changelog
2047 cl = self.changelog
2046 if not bases:
2048 if not bases:
2047 bases = [nullid]
2049 bases = [nullid]
2048 csets, bases, heads = cl.nodesbetween(bases, heads)
2050 csets, bases, heads = cl.nodesbetween(bases, heads)
2049 # We assume that all ancestors of bases are known
2051 # We assume that all ancestors of bases are known
2050 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2052 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2051 return self._changegroupsubset(common, csets, heads, source)
2053 return self._changegroupsubset(common, csets, heads, source)
2052
2054
2053 def getlocalbundle(self, source, outgoing):
2055 def getlocalbundle(self, source, outgoing):
2054 """Like getbundle, but taking a discovery.outgoing as an argument.
2056 """Like getbundle, but taking a discovery.outgoing as an argument.
2055
2057
2056 This is only implemented for local repos and reuses potentially
2058 This is only implemented for local repos and reuses potentially
2057 precomputed sets in outgoing."""
2059 precomputed sets in outgoing."""
2058 if not outgoing.missing:
2060 if not outgoing.missing:
2059 return None
2061 return None
2060 return self._changegroupsubset(outgoing.common,
2062 return self._changegroupsubset(outgoing.common,
2061 outgoing.missing,
2063 outgoing.missing,
2062 outgoing.missingheads,
2064 outgoing.missingheads,
2063 source)
2065 source)
2064
2066
2065 def getbundle(self, source, heads=None, common=None):
2067 def getbundle(self, source, heads=None, common=None):
2066 """Like changegroupsubset, but returns the set difference between the
2068 """Like changegroupsubset, but returns the set difference between the
2067 ancestors of heads and the ancestors common.
2069 ancestors of heads and the ancestors common.
2068
2070
2069 If heads is None, use the local heads. If common is None, use [nullid].
2071 If heads is None, use the local heads. If common is None, use [nullid].
2070
2072
2071 The nodes in common might not all be known locally due to the way the
2073 The nodes in common might not all be known locally due to the way the
2072 current discovery protocol works.
2074 current discovery protocol works.
2073 """
2075 """
2074 cl = self.changelog
2076 cl = self.changelog
2075 if common:
2077 if common:
2076 nm = cl.nodemap
2078 nm = cl.nodemap
2077 common = [n for n in common if n in nm]
2079 common = [n for n in common if n in nm]
2078 else:
2080 else:
2079 common = [nullid]
2081 common = [nullid]
2080 if not heads:
2082 if not heads:
2081 heads = cl.heads()
2083 heads = cl.heads()
2082 return self.getlocalbundle(source,
2084 return self.getlocalbundle(source,
2083 discovery.outgoing(cl, common, heads))
2085 discovery.outgoing(cl, common, heads))
2084
2086
2085 def _changegroupsubset(self, commonrevs, csets, heads, source):
2087 def _changegroupsubset(self, commonrevs, csets, heads, source):
2086
2088
2087 cl = self.changelog
2089 cl = self.changelog
2088 mf = self.manifest
2090 mf = self.manifest
2089 mfs = {} # needed manifests
2091 mfs = {} # needed manifests
2090 fnodes = {} # needed file nodes
2092 fnodes = {} # needed file nodes
2091 changedfiles = set()
2093 changedfiles = set()
2092 fstate = ['', {}]
2094 fstate = ['', {}]
2093 count = [0, 0]
2095 count = [0, 0]
2094
2096
2095 # can we go through the fast path ?
2097 # can we go through the fast path ?
2096 heads.sort()
2098 heads.sort()
2097 if heads == sorted(self.heads()):
2099 if heads == sorted(self.heads()):
2098 return self._changegroup(csets, source)
2100 return self._changegroup(csets, source)
2099
2101
2100 # slow path
2102 # slow path
2101 self.hook('preoutgoing', throw=True, source=source)
2103 self.hook('preoutgoing', throw=True, source=source)
2102 self.changegroupinfo(csets, source)
2104 self.changegroupinfo(csets, source)
2103
2105
2104 # filter any nodes that claim to be part of the known set
2106 # filter any nodes that claim to be part of the known set
2105 def prune(revlog, missing):
2107 def prune(revlog, missing):
2106 rr, rl = revlog.rev, revlog.linkrev
2108 rr, rl = revlog.rev, revlog.linkrev
2107 return [n for n in missing
2109 return [n for n in missing
2108 if rl(rr(n)) not in commonrevs]
2110 if rl(rr(n)) not in commonrevs]
2109
2111
2110 progress = self.ui.progress
2112 progress = self.ui.progress
2111 _bundling = _('bundling')
2113 _bundling = _('bundling')
2112 _changesets = _('changesets')
2114 _changesets = _('changesets')
2113 _manifests = _('manifests')
2115 _manifests = _('manifests')
2114 _files = _('files')
2116 _files = _('files')
2115
2117
2116 def lookup(revlog, x):
2118 def lookup(revlog, x):
2117 if revlog == cl:
2119 if revlog == cl:
2118 c = cl.read(x)
2120 c = cl.read(x)
2119 changedfiles.update(c[3])
2121 changedfiles.update(c[3])
2120 mfs.setdefault(c[0], x)
2122 mfs.setdefault(c[0], x)
2121 count[0] += 1
2123 count[0] += 1
2122 progress(_bundling, count[0],
2124 progress(_bundling, count[0],
2123 unit=_changesets, total=count[1])
2125 unit=_changesets, total=count[1])
2124 return x
2126 return x
2125 elif revlog == mf:
2127 elif revlog == mf:
2126 clnode = mfs[x]
2128 clnode = mfs[x]
2127 mdata = mf.readfast(x)
2129 mdata = mf.readfast(x)
2128 for f, n in mdata.iteritems():
2130 for f, n in mdata.iteritems():
2129 if f in changedfiles:
2131 if f in changedfiles:
2130 fnodes[f].setdefault(n, clnode)
2132 fnodes[f].setdefault(n, clnode)
2131 count[0] += 1
2133 count[0] += 1
2132 progress(_bundling, count[0],
2134 progress(_bundling, count[0],
2133 unit=_manifests, total=count[1])
2135 unit=_manifests, total=count[1])
2134 return clnode
2136 return clnode
2135 else:
2137 else:
2136 progress(_bundling, count[0], item=fstate[0],
2138 progress(_bundling, count[0], item=fstate[0],
2137 unit=_files, total=count[1])
2139 unit=_files, total=count[1])
2138 return fstate[1][x]
2140 return fstate[1][x]
2139
2141
2140 bundler = changegroup.bundle10(lookup)
2142 bundler = changegroup.bundle10(lookup)
2141 reorder = self.ui.config('bundle', 'reorder', 'auto')
2143 reorder = self.ui.config('bundle', 'reorder', 'auto')
2142 if reorder == 'auto':
2144 if reorder == 'auto':
2143 reorder = None
2145 reorder = None
2144 else:
2146 else:
2145 reorder = util.parsebool(reorder)
2147 reorder = util.parsebool(reorder)
2146
2148
2147 def gengroup():
2149 def gengroup():
2148 # Create a changenode group generator that will call our functions
2150 # Create a changenode group generator that will call our functions
2149 # back to lookup the owning changenode and collect information.
2151 # back to lookup the owning changenode and collect information.
2150 count[:] = [0, len(csets)]
2152 count[:] = [0, len(csets)]
2151 for chunk in cl.group(csets, bundler, reorder=reorder):
2153 for chunk in cl.group(csets, bundler, reorder=reorder):
2152 yield chunk
2154 yield chunk
2153 progress(_bundling, None)
2155 progress(_bundling, None)
2154
2156
2155 # Create a generator for the manifestnodes that calls our lookup
2157 # Create a generator for the manifestnodes that calls our lookup
2156 # and data collection functions back.
2158 # and data collection functions back.
2157 for f in changedfiles:
2159 for f in changedfiles:
2158 fnodes[f] = {}
2160 fnodes[f] = {}
2159 count[:] = [0, len(mfs)]
2161 count[:] = [0, len(mfs)]
2160 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2162 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2161 yield chunk
2163 yield chunk
2162 progress(_bundling, None)
2164 progress(_bundling, None)
2163
2165
2164 mfs.clear()
2166 mfs.clear()
2165
2167
2166 # Go through all our files in order sorted by name.
2168 # Go through all our files in order sorted by name.
2167 count[:] = [0, len(changedfiles)]
2169 count[:] = [0, len(changedfiles)]
2168 for fname in sorted(changedfiles):
2170 for fname in sorted(changedfiles):
2169 filerevlog = self.file(fname)
2171 filerevlog = self.file(fname)
2170 if not len(filerevlog):
2172 if not len(filerevlog):
2171 raise util.Abort(_("empty or missing revlog for %s")
2173 raise util.Abort(_("empty or missing revlog for %s")
2172 % fname)
2174 % fname)
2173 fstate[0] = fname
2175 fstate[0] = fname
2174 fstate[1] = fnodes.pop(fname, {})
2176 fstate[1] = fnodes.pop(fname, {})
2175
2177
2176 nodelist = prune(filerevlog, fstate[1])
2178 nodelist = prune(filerevlog, fstate[1])
2177 if nodelist:
2179 if nodelist:
2178 count[0] += 1
2180 count[0] += 1
2179 yield bundler.fileheader(fname)
2181 yield bundler.fileheader(fname)
2180 for chunk in filerevlog.group(nodelist, bundler, reorder):
2182 for chunk in filerevlog.group(nodelist, bundler, reorder):
2181 yield chunk
2183 yield chunk
2182
2184
2183 # Signal that no more groups are left.
2185 # Signal that no more groups are left.
2184 yield bundler.close()
2186 yield bundler.close()
2185 progress(_bundling, None)
2187 progress(_bundling, None)
2186
2188
2187 if csets:
2189 if csets:
2188 self.hook('outgoing', node=hex(csets[0]), source=source)
2190 self.hook('outgoing', node=hex(csets[0]), source=source)
2189
2191
2190 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2192 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2191
2193
2192 def changegroup(self, basenodes, source):
2194 def changegroup(self, basenodes, source):
2193 # to avoid a race we use changegroupsubset() (issue1320)
2195 # to avoid a race we use changegroupsubset() (issue1320)
2194 return self.changegroupsubset(basenodes, self.heads(), source)
2196 return self.changegroupsubset(basenodes, self.heads(), source)
2195
2197
2196 def _changegroup(self, nodes, source):
2198 def _changegroup(self, nodes, source):
2197 """Compute the changegroup of all nodes that we have that a recipient
2199 """Compute the changegroup of all nodes that we have that a recipient
2198 doesn't. Return a chunkbuffer object whose read() method will return
2200 doesn't. Return a chunkbuffer object whose read() method will return
2199 successive changegroup chunks.
2201 successive changegroup chunks.
2200
2202
2201 This is much easier than the previous function as we can assume that
2203 This is much easier than the previous function as we can assume that
2202 the recipient has any changenode we aren't sending them.
2204 the recipient has any changenode we aren't sending them.
2203
2205
2204 nodes is the set of nodes to send"""
2206 nodes is the set of nodes to send"""
2205
2207
2206 cl = self.changelog
2208 cl = self.changelog
2207 mf = self.manifest
2209 mf = self.manifest
2208 mfs = {}
2210 mfs = {}
2209 changedfiles = set()
2211 changedfiles = set()
2210 fstate = ['']
2212 fstate = ['']
2211 count = [0, 0]
2213 count = [0, 0]
2212
2214
2213 self.hook('preoutgoing', throw=True, source=source)
2215 self.hook('preoutgoing', throw=True, source=source)
2214 self.changegroupinfo(nodes, source)
2216 self.changegroupinfo(nodes, source)
2215
2217
2216 revset = set([cl.rev(n) for n in nodes])
2218 revset = set([cl.rev(n) for n in nodes])
2217
2219
2218 def gennodelst(log):
2220 def gennodelst(log):
2219 ln, llr = log.node, log.linkrev
2221 ln, llr = log.node, log.linkrev
2220 return [ln(r) for r in log if llr(r) in revset]
2222 return [ln(r) for r in log if llr(r) in revset]
2221
2223
2222 progress = self.ui.progress
2224 progress = self.ui.progress
2223 _bundling = _('bundling')
2225 _bundling = _('bundling')
2224 _changesets = _('changesets')
2226 _changesets = _('changesets')
2225 _manifests = _('manifests')
2227 _manifests = _('manifests')
2226 _files = _('files')
2228 _files = _('files')
2227
2229
2228 def lookup(revlog, x):
2230 def lookup(revlog, x):
2229 if revlog == cl:
2231 if revlog == cl:
2230 c = cl.read(x)
2232 c = cl.read(x)
2231 changedfiles.update(c[3])
2233 changedfiles.update(c[3])
2232 mfs.setdefault(c[0], x)
2234 mfs.setdefault(c[0], x)
2233 count[0] += 1
2235 count[0] += 1
2234 progress(_bundling, count[0],
2236 progress(_bundling, count[0],
2235 unit=_changesets, total=count[1])
2237 unit=_changesets, total=count[1])
2236 return x
2238 return x
2237 elif revlog == mf:
2239 elif revlog == mf:
2238 count[0] += 1
2240 count[0] += 1
2239 progress(_bundling, count[0],
2241 progress(_bundling, count[0],
2240 unit=_manifests, total=count[1])
2242 unit=_manifests, total=count[1])
2241 return cl.node(revlog.linkrev(revlog.rev(x)))
2243 return cl.node(revlog.linkrev(revlog.rev(x)))
2242 else:
2244 else:
2243 progress(_bundling, count[0], item=fstate[0],
2245 progress(_bundling, count[0], item=fstate[0],
2244 total=count[1], unit=_files)
2246 total=count[1], unit=_files)
2245 return cl.node(revlog.linkrev(revlog.rev(x)))
2247 return cl.node(revlog.linkrev(revlog.rev(x)))
2246
2248
2247 bundler = changegroup.bundle10(lookup)
2249 bundler = changegroup.bundle10(lookup)
2248 reorder = self.ui.config('bundle', 'reorder', 'auto')
2250 reorder = self.ui.config('bundle', 'reorder', 'auto')
2249 if reorder == 'auto':
2251 if reorder == 'auto':
2250 reorder = None
2252 reorder = None
2251 else:
2253 else:
2252 reorder = util.parsebool(reorder)
2254 reorder = util.parsebool(reorder)
2253
2255
2254 def gengroup():
2256 def gengroup():
2255 '''yield a sequence of changegroup chunks (strings)'''
2257 '''yield a sequence of changegroup chunks (strings)'''
2256 # construct a list of all changed files
2258 # construct a list of all changed files
2257
2259
2258 count[:] = [0, len(nodes)]
2260 count[:] = [0, len(nodes)]
2259 for chunk in cl.group(nodes, bundler, reorder=reorder):
2261 for chunk in cl.group(nodes, bundler, reorder=reorder):
2260 yield chunk
2262 yield chunk
2261 progress(_bundling, None)
2263 progress(_bundling, None)
2262
2264
2263 count[:] = [0, len(mfs)]
2265 count[:] = [0, len(mfs)]
2264 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2266 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2265 yield chunk
2267 yield chunk
2266 progress(_bundling, None)
2268 progress(_bundling, None)
2267
2269
2268 count[:] = [0, len(changedfiles)]
2270 count[:] = [0, len(changedfiles)]
2269 for fname in sorted(changedfiles):
2271 for fname in sorted(changedfiles):
2270 filerevlog = self.file(fname)
2272 filerevlog = self.file(fname)
2271 if not len(filerevlog):
2273 if not len(filerevlog):
2272 raise util.Abort(_("empty or missing revlog for %s")
2274 raise util.Abort(_("empty or missing revlog for %s")
2273 % fname)
2275 % fname)
2274 fstate[0] = fname
2276 fstate[0] = fname
2275 nodelist = gennodelst(filerevlog)
2277 nodelist = gennodelst(filerevlog)
2276 if nodelist:
2278 if nodelist:
2277 count[0] += 1
2279 count[0] += 1
2278 yield bundler.fileheader(fname)
2280 yield bundler.fileheader(fname)
2279 for chunk in filerevlog.group(nodelist, bundler, reorder):
2281 for chunk in filerevlog.group(nodelist, bundler, reorder):
2280 yield chunk
2282 yield chunk
2281 yield bundler.close()
2283 yield bundler.close()
2282 progress(_bundling, None)
2284 progress(_bundling, None)
2283
2285
2284 if nodes:
2286 if nodes:
2285 self.hook('outgoing', node=hex(nodes[0]), source=source)
2287 self.hook('outgoing', node=hex(nodes[0]), source=source)
2286
2288
2287 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2289 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2288
2290
2289 def addchangegroup(self, source, srctype, url, emptyok=False):
2291 def addchangegroup(self, source, srctype, url, emptyok=False):
2290 """Add the changegroup returned by source.read() to this repo.
2292 """Add the changegroup returned by source.read() to this repo.
2291 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2293 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2292 the URL of the repo where this changegroup is coming from.
2294 the URL of the repo where this changegroup is coming from.
2293
2295
2294 Return an integer summarizing the change to this repo:
2296 Return an integer summarizing the change to this repo:
2295 - nothing changed or no source: 0
2297 - nothing changed or no source: 0
2296 - more heads than before: 1+added heads (2..n)
2298 - more heads than before: 1+added heads (2..n)
2297 - fewer heads than before: -1-removed heads (-2..-n)
2299 - fewer heads than before: -1-removed heads (-2..-n)
2298 - number of heads stays the same: 1
2300 - number of heads stays the same: 1
2299 """
2301 """
2300 def csmap(x):
2302 def csmap(x):
2301 self.ui.debug("add changeset %s\n" % short(x))
2303 self.ui.debug("add changeset %s\n" % short(x))
2302 return len(cl)
2304 return len(cl)
2303
2305
2304 def revmap(x):
2306 def revmap(x):
2305 return cl.rev(x)
2307 return cl.rev(x)
2306
2308
2307 if not source:
2309 if not source:
2308 return 0
2310 return 0
2309
2311
2310 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2312 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2311
2313
2312 changesets = files = revisions = 0
2314 changesets = files = revisions = 0
2313 efiles = set()
2315 efiles = set()
2314
2316
2315 # write changelog data to temp files so concurrent readers will not see
2317 # write changelog data to temp files so concurrent readers will not see
2316 # inconsistent view
2318 # inconsistent view
2317 cl = self.changelog
2319 cl = self.changelog
2318 cl.delayupdate()
2320 cl.delayupdate()
2319 oldheads = cl.heads()
2321 oldheads = cl.heads()
2320
2322
2321 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2323 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2322 try:
2324 try:
2323 trp = weakref.proxy(tr)
2325 trp = weakref.proxy(tr)
2324 # pull off the changeset group
2326 # pull off the changeset group
2325 self.ui.status(_("adding changesets\n"))
2327 self.ui.status(_("adding changesets\n"))
2326 clstart = len(cl)
2328 clstart = len(cl)
2327 class prog(object):
2329 class prog(object):
2328 step = _('changesets')
2330 step = _('changesets')
2329 count = 1
2331 count = 1
2330 ui = self.ui
2332 ui = self.ui
2331 total = None
2333 total = None
2332 def __call__(self):
2334 def __call__(self):
2333 self.ui.progress(self.step, self.count, unit=_('chunks'),
2335 self.ui.progress(self.step, self.count, unit=_('chunks'),
2334 total=self.total)
2336 total=self.total)
2335 self.count += 1
2337 self.count += 1
2336 pr = prog()
2338 pr = prog()
2337 source.callback = pr
2339 source.callback = pr
2338
2340
2339 source.changelogheader()
2341 source.changelogheader()
2340 srccontent = cl.addgroup(source, csmap, trp)
2342 srccontent = cl.addgroup(source, csmap, trp)
2341 if not (srccontent or emptyok):
2343 if not (srccontent or emptyok):
2342 raise util.Abort(_("received changelog group is empty"))
2344 raise util.Abort(_("received changelog group is empty"))
2343 clend = len(cl)
2345 clend = len(cl)
2344 changesets = clend - clstart
2346 changesets = clend - clstart
2345 for c in xrange(clstart, clend):
2347 for c in xrange(clstart, clend):
2346 efiles.update(self[c].files())
2348 efiles.update(self[c].files())
2347 efiles = len(efiles)
2349 efiles = len(efiles)
2348 self.ui.progress(_('changesets'), None)
2350 self.ui.progress(_('changesets'), None)
2349
2351
2350 # pull off the manifest group
2352 # pull off the manifest group
2351 self.ui.status(_("adding manifests\n"))
2353 self.ui.status(_("adding manifests\n"))
2352 pr.step = _('manifests')
2354 pr.step = _('manifests')
2353 pr.count = 1
2355 pr.count = 1
2354 pr.total = changesets # manifests <= changesets
2356 pr.total = changesets # manifests <= changesets
2355 # no need to check for empty manifest group here:
2357 # no need to check for empty manifest group here:
2356 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2358 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2357 # no new manifest will be created and the manifest group will
2359 # no new manifest will be created and the manifest group will
2358 # be empty during the pull
2360 # be empty during the pull
2359 source.manifestheader()
2361 source.manifestheader()
2360 self.manifest.addgroup(source, revmap, trp)
2362 self.manifest.addgroup(source, revmap, trp)
2361 self.ui.progress(_('manifests'), None)
2363 self.ui.progress(_('manifests'), None)
2362
2364
2363 needfiles = {}
2365 needfiles = {}
2364 if self.ui.configbool('server', 'validate', default=False):
2366 if self.ui.configbool('server', 'validate', default=False):
2365 # validate incoming csets have their manifests
2367 # validate incoming csets have their manifests
2366 for cset in xrange(clstart, clend):
2368 for cset in xrange(clstart, clend):
2367 mfest = self.changelog.read(self.changelog.node(cset))[0]
2369 mfest = self.changelog.read(self.changelog.node(cset))[0]
2368 mfest = self.manifest.readdelta(mfest)
2370 mfest = self.manifest.readdelta(mfest)
2369 # store file nodes we must see
2371 # store file nodes we must see
2370 for f, n in mfest.iteritems():
2372 for f, n in mfest.iteritems():
2371 needfiles.setdefault(f, set()).add(n)
2373 needfiles.setdefault(f, set()).add(n)
2372
2374
2373 # process the files
2375 # process the files
2374 self.ui.status(_("adding file changes\n"))
2376 self.ui.status(_("adding file changes\n"))
2375 pr.step = _('files')
2377 pr.step = _('files')
2376 pr.count = 1
2378 pr.count = 1
2377 pr.total = efiles
2379 pr.total = efiles
2378 source.callback = None
2380 source.callback = None
2379
2381
2380 while True:
2382 while True:
2381 chunkdata = source.filelogheader()
2383 chunkdata = source.filelogheader()
2382 if not chunkdata:
2384 if not chunkdata:
2383 break
2385 break
2384 f = chunkdata["filename"]
2386 f = chunkdata["filename"]
2385 self.ui.debug("adding %s revisions\n" % f)
2387 self.ui.debug("adding %s revisions\n" % f)
2386 pr()
2388 pr()
2387 fl = self.file(f)
2389 fl = self.file(f)
2388 o = len(fl)
2390 o = len(fl)
2389 if not fl.addgroup(source, revmap, trp):
2391 if not fl.addgroup(source, revmap, trp):
2390 raise util.Abort(_("received file revlog group is empty"))
2392 raise util.Abort(_("received file revlog group is empty"))
2391 revisions += len(fl) - o
2393 revisions += len(fl) - o
2392 files += 1
2394 files += 1
2393 if f in needfiles:
2395 if f in needfiles:
2394 needs = needfiles[f]
2396 needs = needfiles[f]
2395 for new in xrange(o, len(fl)):
2397 for new in xrange(o, len(fl)):
2396 n = fl.node(new)
2398 n = fl.node(new)
2397 if n in needs:
2399 if n in needs:
2398 needs.remove(n)
2400 needs.remove(n)
2399 if not needs:
2401 if not needs:
2400 del needfiles[f]
2402 del needfiles[f]
2401 self.ui.progress(_('files'), None)
2403 self.ui.progress(_('files'), None)
2402
2404
2403 for f, needs in needfiles.iteritems():
2405 for f, needs in needfiles.iteritems():
2404 fl = self.file(f)
2406 fl = self.file(f)
2405 for n in needs:
2407 for n in needs:
2406 try:
2408 try:
2407 fl.rev(n)
2409 fl.rev(n)
2408 except error.LookupError:
2410 except error.LookupError:
2409 raise util.Abort(
2411 raise util.Abort(
2410 _('missing file data for %s:%s - run hg verify') %
2412 _('missing file data for %s:%s - run hg verify') %
2411 (f, hex(n)))
2413 (f, hex(n)))
2412
2414
2413 dh = 0
2415 dh = 0
2414 if oldheads:
2416 if oldheads:
2415 heads = cl.heads()
2417 heads = cl.heads()
2416 dh = len(heads) - len(oldheads)
2418 dh = len(heads) - len(oldheads)
2417 for h in heads:
2419 for h in heads:
2418 if h not in oldheads and self[h].closesbranch():
2420 if h not in oldheads and self[h].closesbranch():
2419 dh -= 1
2421 dh -= 1
2420 htext = ""
2422 htext = ""
2421 if dh:
2423 if dh:
2422 htext = _(" (%+d heads)") % dh
2424 htext = _(" (%+d heads)") % dh
2423
2425
2424 self.ui.status(_("added %d changesets"
2426 self.ui.status(_("added %d changesets"
2425 " with %d changes to %d files%s\n")
2427 " with %d changes to %d files%s\n")
2426 % (changesets, revisions, files, htext))
2428 % (changesets, revisions, files, htext))
2427 obsolete.clearobscaches(self)
2429 obsolete.clearobscaches(self)
2428
2430
2429 if changesets > 0:
2431 if changesets > 0:
2430 p = lambda: cl.writepending() and self.root or ""
2432 p = lambda: cl.writepending() and self.root or ""
2431 self.hook('pretxnchangegroup', throw=True,
2433 self.hook('pretxnchangegroup', throw=True,
2432 node=hex(cl.node(clstart)), source=srctype,
2434 node=hex(cl.node(clstart)), source=srctype,
2433 url=url, pending=p)
2435 url=url, pending=p)
2434
2436
2435 added = [cl.node(r) for r in xrange(clstart, clend)]
2437 added = [cl.node(r) for r in xrange(clstart, clend)]
2436 publishing = self.ui.configbool('phases', 'publish', True)
2438 publishing = self.ui.configbool('phases', 'publish', True)
2437 if srctype == 'push':
2439 if srctype == 'push':
2438 # Old server can not push the boundary themself.
2440 # Old server can not push the boundary themself.
2439 # New server won't push the boundary if changeset already
2441 # New server won't push the boundary if changeset already
2440 # existed locally as secrete
2442 # existed locally as secrete
2441 #
2443 #
2442 # We should not use added here but the list of all change in
2444 # We should not use added here but the list of all change in
2443 # the bundle
2445 # the bundle
2444 if publishing:
2446 if publishing:
2445 phases.advanceboundary(self, phases.public, srccontent)
2447 phases.advanceboundary(self, phases.public, srccontent)
2446 else:
2448 else:
2447 phases.advanceboundary(self, phases.draft, srccontent)
2449 phases.advanceboundary(self, phases.draft, srccontent)
2448 phases.retractboundary(self, phases.draft, added)
2450 phases.retractboundary(self, phases.draft, added)
2449 elif srctype != 'strip':
2451 elif srctype != 'strip':
2450 # publishing only alter behavior during push
2452 # publishing only alter behavior during push
2451 #
2453 #
2452 # strip should not touch boundary at all
2454 # strip should not touch boundary at all
2453 phases.retractboundary(self, phases.draft, added)
2455 phases.retractboundary(self, phases.draft, added)
2454
2456
2455 # make changelog see real files again
2457 # make changelog see real files again
2456 cl.finalize(trp)
2458 cl.finalize(trp)
2457
2459
2458 tr.close()
2460 tr.close()
2459
2461
2460 if changesets > 0:
2462 if changesets > 0:
2461 self.updatebranchcache()
2463 self.updatebranchcache()
2462 def runhooks():
2464 def runhooks():
2463 # forcefully update the on-disk branch cache
2465 # forcefully update the on-disk branch cache
2464 self.ui.debug("updating the branch cache\n")
2466 self.ui.debug("updating the branch cache\n")
2465 self.hook("changegroup", node=hex(cl.node(clstart)),
2467 self.hook("changegroup", node=hex(cl.node(clstart)),
2466 source=srctype, url=url)
2468 source=srctype, url=url)
2467
2469
2468 for n in added:
2470 for n in added:
2469 self.hook("incoming", node=hex(n), source=srctype,
2471 self.hook("incoming", node=hex(n), source=srctype,
2470 url=url)
2472 url=url)
2471 self._afterlock(runhooks)
2473 self._afterlock(runhooks)
2472
2474
2473 finally:
2475 finally:
2474 tr.release()
2476 tr.release()
2475 # never return 0 here:
2477 # never return 0 here:
2476 if dh < 0:
2478 if dh < 0:
2477 return dh - 1
2479 return dh - 1
2478 else:
2480 else:
2479 return dh + 1
2481 return dh + 1
2480
2482
2481 def stream_in(self, remote, requirements):
2483 def stream_in(self, remote, requirements):
2482 lock = self.lock()
2484 lock = self.lock()
2483 try:
2485 try:
2484 # Save remote branchmap. We will use it later
2486 # Save remote branchmap. We will use it later
2485 # to speed up branchcache creation
2487 # to speed up branchcache creation
2486 rbranchmap = None
2488 rbranchmap = None
2487 if remote.capable("branchmap"):
2489 if remote.capable("branchmap"):
2488 rbranchmap = remote.branchmap()
2490 rbranchmap = remote.branchmap()
2489
2491
2490 fp = remote.stream_out()
2492 fp = remote.stream_out()
2491 l = fp.readline()
2493 l = fp.readline()
2492 try:
2494 try:
2493 resp = int(l)
2495 resp = int(l)
2494 except ValueError:
2496 except ValueError:
2495 raise error.ResponseError(
2497 raise error.ResponseError(
2496 _('unexpected response from remote server:'), l)
2498 _('unexpected response from remote server:'), l)
2497 if resp == 1:
2499 if resp == 1:
2498 raise util.Abort(_('operation forbidden by server'))
2500 raise util.Abort(_('operation forbidden by server'))
2499 elif resp == 2:
2501 elif resp == 2:
2500 raise util.Abort(_('locking the remote repository failed'))
2502 raise util.Abort(_('locking the remote repository failed'))
2501 elif resp != 0:
2503 elif resp != 0:
2502 raise util.Abort(_('the server sent an unknown error code'))
2504 raise util.Abort(_('the server sent an unknown error code'))
2503 self.ui.status(_('streaming all changes\n'))
2505 self.ui.status(_('streaming all changes\n'))
2504 l = fp.readline()
2506 l = fp.readline()
2505 try:
2507 try:
2506 total_files, total_bytes = map(int, l.split(' ', 1))
2508 total_files, total_bytes = map(int, l.split(' ', 1))
2507 except (ValueError, TypeError):
2509 except (ValueError, TypeError):
2508 raise error.ResponseError(
2510 raise error.ResponseError(
2509 _('unexpected response from remote server:'), l)
2511 _('unexpected response from remote server:'), l)
2510 self.ui.status(_('%d files to transfer, %s of data\n') %
2512 self.ui.status(_('%d files to transfer, %s of data\n') %
2511 (total_files, util.bytecount(total_bytes)))
2513 (total_files, util.bytecount(total_bytes)))
2512 handled_bytes = 0
2514 handled_bytes = 0
2513 self.ui.progress(_('clone'), 0, total=total_bytes)
2515 self.ui.progress(_('clone'), 0, total=total_bytes)
2514 start = time.time()
2516 start = time.time()
2515 for i in xrange(total_files):
2517 for i in xrange(total_files):
2516 # XXX doesn't support '\n' or '\r' in filenames
2518 # XXX doesn't support '\n' or '\r' in filenames
2517 l = fp.readline()
2519 l = fp.readline()
2518 try:
2520 try:
2519 name, size = l.split('\0', 1)
2521 name, size = l.split('\0', 1)
2520 size = int(size)
2522 size = int(size)
2521 except (ValueError, TypeError):
2523 except (ValueError, TypeError):
2522 raise error.ResponseError(
2524 raise error.ResponseError(
2523 _('unexpected response from remote server:'), l)
2525 _('unexpected response from remote server:'), l)
2524 if self.ui.debugflag:
2526 if self.ui.debugflag:
2525 self.ui.debug('adding %s (%s)\n' %
2527 self.ui.debug('adding %s (%s)\n' %
2526 (name, util.bytecount(size)))
2528 (name, util.bytecount(size)))
2527 # for backwards compat, name was partially encoded
2529 # for backwards compat, name was partially encoded
2528 ofp = self.sopener(store.decodedir(name), 'w')
2530 ofp = self.sopener(store.decodedir(name), 'w')
2529 for chunk in util.filechunkiter(fp, limit=size):
2531 for chunk in util.filechunkiter(fp, limit=size):
2530 handled_bytes += len(chunk)
2532 handled_bytes += len(chunk)
2531 self.ui.progress(_('clone'), handled_bytes,
2533 self.ui.progress(_('clone'), handled_bytes,
2532 total=total_bytes)
2534 total=total_bytes)
2533 ofp.write(chunk)
2535 ofp.write(chunk)
2534 ofp.close()
2536 ofp.close()
2535 elapsed = time.time() - start
2537 elapsed = time.time() - start
2536 if elapsed <= 0:
2538 if elapsed <= 0:
2537 elapsed = 0.001
2539 elapsed = 0.001
2538 self.ui.progress(_('clone'), None)
2540 self.ui.progress(_('clone'), None)
2539 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2541 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2540 (util.bytecount(total_bytes), elapsed,
2542 (util.bytecount(total_bytes), elapsed,
2541 util.bytecount(total_bytes / elapsed)))
2543 util.bytecount(total_bytes / elapsed)))
2542
2544
2543 # new requirements = old non-format requirements +
2545 # new requirements = old non-format requirements +
2544 # new format-related
2546 # new format-related
2545 # requirements from the streamed-in repository
2547 # requirements from the streamed-in repository
2546 requirements.update(set(self.requirements) - self.supportedformats)
2548 requirements.update(set(self.requirements) - self.supportedformats)
2547 self._applyrequirements(requirements)
2549 self._applyrequirements(requirements)
2548 self._writerequirements()
2550 self._writerequirements()
2549
2551
2550 if rbranchmap:
2552 if rbranchmap:
2551 rbheads = []
2553 rbheads = []
2552 for bheads in rbranchmap.itervalues():
2554 for bheads in rbranchmap.itervalues():
2553 rbheads.extend(bheads)
2555 rbheads.extend(bheads)
2554
2556
2555 self.branchcache = rbranchmap
2557 self.branchcache = rbranchmap
2556 if rbheads:
2558 if rbheads:
2557 rtiprev = max((int(self.changelog.rev(node))
2559 rtiprev = max((int(self.changelog.rev(node))
2558 for node in rbheads))
2560 for node in rbheads))
2559 self._writebranchcache(self.branchcache,
2561 self._writebranchcache(self.branchcache,
2560 self[rtiprev].node(), rtiprev)
2562 self[rtiprev].node(), rtiprev)
2561 self.invalidate()
2563 self.invalidate()
2562 return len(self.heads()) + 1
2564 return len(self.heads()) + 1
2563 finally:
2565 finally:
2564 lock.release()
2566 lock.release()
2565
2567
2566 def clone(self, remote, heads=[], stream=False):
2568 def clone(self, remote, heads=[], stream=False):
2567 '''clone remote repository.
2569 '''clone remote repository.
2568
2570
2569 keyword arguments:
2571 keyword arguments:
2570 heads: list of revs to clone (forces use of pull)
2572 heads: list of revs to clone (forces use of pull)
2571 stream: use streaming clone if possible'''
2573 stream: use streaming clone if possible'''
2572
2574
2573 # now, all clients that can request uncompressed clones can
2575 # now, all clients that can request uncompressed clones can
2574 # read repo formats supported by all servers that can serve
2576 # read repo formats supported by all servers that can serve
2575 # them.
2577 # them.
2576
2578
2577 # if revlog format changes, client will have to check version
2579 # if revlog format changes, client will have to check version
2578 # and format flags on "stream" capability, and use
2580 # and format flags on "stream" capability, and use
2579 # uncompressed only if compatible.
2581 # uncompressed only if compatible.
2580
2582
2581 if not stream:
2583 if not stream:
2582 # if the server explicitly prefers to stream (for fast LANs)
2584 # if the server explicitly prefers to stream (for fast LANs)
2583 stream = remote.capable('stream-preferred')
2585 stream = remote.capable('stream-preferred')
2584
2586
2585 if stream and not heads:
2587 if stream and not heads:
2586 # 'stream' means remote revlog format is revlogv1 only
2588 # 'stream' means remote revlog format is revlogv1 only
2587 if remote.capable('stream'):
2589 if remote.capable('stream'):
2588 return self.stream_in(remote, set(('revlogv1',)))
2590 return self.stream_in(remote, set(('revlogv1',)))
2589 # otherwise, 'streamreqs' contains the remote revlog format
2591 # otherwise, 'streamreqs' contains the remote revlog format
2590 streamreqs = remote.capable('streamreqs')
2592 streamreqs = remote.capable('streamreqs')
2591 if streamreqs:
2593 if streamreqs:
2592 streamreqs = set(streamreqs.split(','))
2594 streamreqs = set(streamreqs.split(','))
2593 # if we support it, stream in and adjust our requirements
2595 # if we support it, stream in and adjust our requirements
2594 if not streamreqs - self.supportedformats:
2596 if not streamreqs - self.supportedformats:
2595 return self.stream_in(remote, streamreqs)
2597 return self.stream_in(remote, streamreqs)
2596 return self.pull(remote, heads)
2598 return self.pull(remote, heads)
2597
2599
2598 def pushkey(self, namespace, key, old, new):
2600 def pushkey(self, namespace, key, old, new):
2599 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2601 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2600 old=old, new=new)
2602 old=old, new=new)
2601 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2603 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2602 ret = pushkey.push(self, namespace, key, old, new)
2604 ret = pushkey.push(self, namespace, key, old, new)
2603 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2605 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2604 ret=ret)
2606 ret=ret)
2605 return ret
2607 return ret
2606
2608
2607 def listkeys(self, namespace):
2609 def listkeys(self, namespace):
2608 self.hook('prelistkeys', throw=True, namespace=namespace)
2610 self.hook('prelistkeys', throw=True, namespace=namespace)
2609 self.ui.debug('listing keys for "%s"\n' % namespace)
2611 self.ui.debug('listing keys for "%s"\n' % namespace)
2610 values = pushkey.list(self, namespace)
2612 values = pushkey.list(self, namespace)
2611 self.hook('listkeys', namespace=namespace, values=values)
2613 self.hook('listkeys', namespace=namespace, values=values)
2612 return values
2614 return values
2613
2615
2614 def debugwireargs(self, one, two, three=None, four=None, five=None):
2616 def debugwireargs(self, one, two, three=None, four=None, five=None):
2615 '''used to test argument passing over the wire'''
2617 '''used to test argument passing over the wire'''
2616 return "%s %s %s %s %s" % (one, two, three, four, five)
2618 return "%s %s %s %s %s" % (one, two, three, four, five)
2617
2619
2618 def savecommitmessage(self, text):
2620 def savecommitmessage(self, text):
2619 fp = self.opener('last-message.txt', 'wb')
2621 fp = self.opener('last-message.txt', 'wb')
2620 try:
2622 try:
2621 fp.write(text)
2623 fp.write(text)
2622 finally:
2624 finally:
2623 fp.close()
2625 fp.close()
2624 return self.pathto(fp.name[len(self.root)+1:])
2626 return self.pathto(fp.name[len(self.root)+1:])
2625
2627
2626 # used to avoid circular references so destructors work
2628 # used to avoid circular references so destructors work
2627 def aftertrans(files):
2629 def aftertrans(files):
2628 renamefiles = [tuple(t) for t in files]
2630 renamefiles = [tuple(t) for t in files]
2629 def a():
2631 def a():
2630 for src, dest in renamefiles:
2632 for src, dest in renamefiles:
2631 try:
2633 try:
2632 util.rename(src, dest)
2634 util.rename(src, dest)
2633 except OSError: # journal file does not yet exist
2635 except OSError: # journal file does not yet exist
2634 pass
2636 pass
2635 return a
2637 return a
2636
2638
2637 def undoname(fn):
2639 def undoname(fn):
2638 base, name = os.path.split(fn)
2640 base, name = os.path.split(fn)
2639 assert name.startswith('journal')
2641 assert name.startswith('journal')
2640 return os.path.join(base, name.replace('journal', 'undo', 1))
2642 return os.path.join(base, name.replace('journal', 'undo', 1))
2641
2643
2642 def instance(ui, path, create):
2644 def instance(ui, path, create):
2643 return localrepository(ui, util.urllocalpath(path), create)
2645 return localrepository(ui, util.urllocalpath(path), create)
2644
2646
2645 def islocal(path):
2647 def islocal(path):
2646 return True
2648 return True
General Comments 0
You need to be logged in to leave comments. Login now