##// END OF EJS Templates
clfilter: ensure changegroup generation is run unfiltered...
Pierre-Yves David -
r17999:85027e98 default
parent child Browse files
Show More
@@ -1,2649 +1,2651 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import bin, hex, nullid, nullrev, short
7 from node import bin, hex, nullid, nullrev, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock, transaction, store, encoding, base85
11 import lock, transaction, store, encoding, base85
12 import scmutil, util, extensions, hook, error, revset
12 import scmutil, util, extensions, hook, error, revset
13 import match as matchmod
13 import match as matchmod
14 import merge as mergemod
14 import merge as mergemod
15 import tags as tagsmod
15 import tags as tagsmod
16 from lock import release
16 from lock import release
17 import weakref, errno, os, time, inspect
17 import weakref, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19 filecache = scmutil.filecache
19 filecache = scmutil.filecache
20
20
21 class storecache(filecache):
21 class storecache(filecache):
22 """filecache for files in the store"""
22 """filecache for files in the store"""
23 def join(self, obj, fname):
23 def join(self, obj, fname):
24 return obj.sjoin(fname)
24 return obj.sjoin(fname)
25
25
26 def unfilteredmeth(orig):
26 def unfilteredmeth(orig):
27 """decorate method that always need to be run on unfiltered version"""
27 """decorate method that always need to be run on unfiltered version"""
28 def wrapper(repo, *args, **kwargs):
28 def wrapper(repo, *args, **kwargs):
29 return orig(repo.unfiltered(), *args, **kwargs)
29 return orig(repo.unfiltered(), *args, **kwargs)
30 return wrapper
30 return wrapper
31
31
32 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
32 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
33 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
33 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
34
34
35 class localpeer(peer.peerrepository):
35 class localpeer(peer.peerrepository):
36 '''peer for a local repo; reflects only the most recent API'''
36 '''peer for a local repo; reflects only the most recent API'''
37
37
38 def __init__(self, repo, caps=MODERNCAPS):
38 def __init__(self, repo, caps=MODERNCAPS):
39 peer.peerrepository.__init__(self)
39 peer.peerrepository.__init__(self)
40 self._repo = repo
40 self._repo = repo
41 self.ui = repo.ui
41 self.ui = repo.ui
42 self._caps = repo._restrictcapabilities(caps)
42 self._caps = repo._restrictcapabilities(caps)
43 self.requirements = repo.requirements
43 self.requirements = repo.requirements
44 self.supportedformats = repo.supportedformats
44 self.supportedformats = repo.supportedformats
45
45
46 def close(self):
46 def close(self):
47 self._repo.close()
47 self._repo.close()
48
48
49 def _capabilities(self):
49 def _capabilities(self):
50 return self._caps
50 return self._caps
51
51
52 def local(self):
52 def local(self):
53 return self._repo
53 return self._repo
54
54
55 def canpush(self):
55 def canpush(self):
56 return True
56 return True
57
57
58 def url(self):
58 def url(self):
59 return self._repo.url()
59 return self._repo.url()
60
60
61 def lookup(self, key):
61 def lookup(self, key):
62 return self._repo.lookup(key)
62 return self._repo.lookup(key)
63
63
64 def branchmap(self):
64 def branchmap(self):
65 return discovery.visiblebranchmap(self._repo)
65 return discovery.visiblebranchmap(self._repo)
66
66
67 def heads(self):
67 def heads(self):
68 return discovery.visibleheads(self._repo)
68 return discovery.visibleheads(self._repo)
69
69
70 def known(self, nodes):
70 def known(self, nodes):
71 return self._repo.known(nodes)
71 return self._repo.known(nodes)
72
72
73 def getbundle(self, source, heads=None, common=None):
73 def getbundle(self, source, heads=None, common=None):
74 return self._repo.getbundle(source, heads=heads, common=common)
74 return self._repo.getbundle(source, heads=heads, common=common)
75
75
76 # TODO We might want to move the next two calls into legacypeer and add
76 # TODO We might want to move the next two calls into legacypeer and add
77 # unbundle instead.
77 # unbundle instead.
78
78
79 def lock(self):
79 def lock(self):
80 return self._repo.lock()
80 return self._repo.lock()
81
81
82 def addchangegroup(self, cg, source, url):
82 def addchangegroup(self, cg, source, url):
83 return self._repo.addchangegroup(cg, source, url)
83 return self._repo.addchangegroup(cg, source, url)
84
84
85 def pushkey(self, namespace, key, old, new):
85 def pushkey(self, namespace, key, old, new):
86 return self._repo.pushkey(namespace, key, old, new)
86 return self._repo.pushkey(namespace, key, old, new)
87
87
88 def listkeys(self, namespace):
88 def listkeys(self, namespace):
89 return self._repo.listkeys(namespace)
89 return self._repo.listkeys(namespace)
90
90
91 def debugwireargs(self, one, two, three=None, four=None, five=None):
91 def debugwireargs(self, one, two, three=None, four=None, five=None):
92 '''used to test argument passing over the wire'''
92 '''used to test argument passing over the wire'''
93 return "%s %s %s %s %s" % (one, two, three, four, five)
93 return "%s %s %s %s %s" % (one, two, three, four, five)
94
94
95 class locallegacypeer(localpeer):
95 class locallegacypeer(localpeer):
96 '''peer extension which implements legacy methods too; used for tests with
96 '''peer extension which implements legacy methods too; used for tests with
97 restricted capabilities'''
97 restricted capabilities'''
98
98
99 def __init__(self, repo):
99 def __init__(self, repo):
100 localpeer.__init__(self, repo, caps=LEGACYCAPS)
100 localpeer.__init__(self, repo, caps=LEGACYCAPS)
101
101
102 def branches(self, nodes):
102 def branches(self, nodes):
103 return self._repo.branches(nodes)
103 return self._repo.branches(nodes)
104
104
105 def between(self, pairs):
105 def between(self, pairs):
106 return self._repo.between(pairs)
106 return self._repo.between(pairs)
107
107
108 def changegroup(self, basenodes, source):
108 def changegroup(self, basenodes, source):
109 return self._repo.changegroup(basenodes, source)
109 return self._repo.changegroup(basenodes, source)
110
110
111 def changegroupsubset(self, bases, heads, source):
111 def changegroupsubset(self, bases, heads, source):
112 return self._repo.changegroupsubset(bases, heads, source)
112 return self._repo.changegroupsubset(bases, heads, source)
113
113
114 class localrepository(object):
114 class localrepository(object):
115
115
116 supportedformats = set(('revlogv1', 'generaldelta'))
116 supportedformats = set(('revlogv1', 'generaldelta'))
117 supported = supportedformats | set(('store', 'fncache', 'shared',
117 supported = supportedformats | set(('store', 'fncache', 'shared',
118 'dotencode'))
118 'dotencode'))
119 openerreqs = set(('revlogv1', 'generaldelta'))
119 openerreqs = set(('revlogv1', 'generaldelta'))
120 requirements = ['revlogv1']
120 requirements = ['revlogv1']
121
121
122 def _baserequirements(self, create):
122 def _baserequirements(self, create):
123 return self.requirements[:]
123 return self.requirements[:]
124
124
125 def __init__(self, baseui, path=None, create=False):
125 def __init__(self, baseui, path=None, create=False):
126 self.wvfs = scmutil.vfs(path, expand=True)
126 self.wvfs = scmutil.vfs(path, expand=True)
127 self.wopener = self.wvfs
127 self.wopener = self.wvfs
128 self.root = self.wvfs.base
128 self.root = self.wvfs.base
129 self.path = self.wvfs.join(".hg")
129 self.path = self.wvfs.join(".hg")
130 self.origroot = path
130 self.origroot = path
131 self.auditor = scmutil.pathauditor(self.root, self._checknested)
131 self.auditor = scmutil.pathauditor(self.root, self._checknested)
132 self.vfs = scmutil.vfs(self.path)
132 self.vfs = scmutil.vfs(self.path)
133 self.opener = self.vfs
133 self.opener = self.vfs
134 self.baseui = baseui
134 self.baseui = baseui
135 self.ui = baseui.copy()
135 self.ui = baseui.copy()
136 # A list of callback to shape the phase if no data were found.
136 # A list of callback to shape the phase if no data were found.
137 # Callback are in the form: func(repo, roots) --> processed root.
137 # Callback are in the form: func(repo, roots) --> processed root.
138 # This list it to be filled by extension during repo setup
138 # This list it to be filled by extension during repo setup
139 self._phasedefaults = []
139 self._phasedefaults = []
140 try:
140 try:
141 self.ui.readconfig(self.join("hgrc"), self.root)
141 self.ui.readconfig(self.join("hgrc"), self.root)
142 extensions.loadall(self.ui)
142 extensions.loadall(self.ui)
143 except IOError:
143 except IOError:
144 pass
144 pass
145
145
146 if not self.vfs.isdir():
146 if not self.vfs.isdir():
147 if create:
147 if create:
148 if not self.wvfs.exists():
148 if not self.wvfs.exists():
149 self.wvfs.makedirs()
149 self.wvfs.makedirs()
150 self.vfs.makedir(notindexed=True)
150 self.vfs.makedir(notindexed=True)
151 requirements = self._baserequirements(create)
151 requirements = self._baserequirements(create)
152 if self.ui.configbool('format', 'usestore', True):
152 if self.ui.configbool('format', 'usestore', True):
153 self.vfs.mkdir("store")
153 self.vfs.mkdir("store")
154 requirements.append("store")
154 requirements.append("store")
155 if self.ui.configbool('format', 'usefncache', True):
155 if self.ui.configbool('format', 'usefncache', True):
156 requirements.append("fncache")
156 requirements.append("fncache")
157 if self.ui.configbool('format', 'dotencode', True):
157 if self.ui.configbool('format', 'dotencode', True):
158 requirements.append('dotencode')
158 requirements.append('dotencode')
159 # create an invalid changelog
159 # create an invalid changelog
160 self.vfs.append(
160 self.vfs.append(
161 "00changelog.i",
161 "00changelog.i",
162 '\0\0\0\2' # represents revlogv2
162 '\0\0\0\2' # represents revlogv2
163 ' dummy changelog to prevent using the old repo layout'
163 ' dummy changelog to prevent using the old repo layout'
164 )
164 )
165 if self.ui.configbool('format', 'generaldelta', False):
165 if self.ui.configbool('format', 'generaldelta', False):
166 requirements.append("generaldelta")
166 requirements.append("generaldelta")
167 requirements = set(requirements)
167 requirements = set(requirements)
168 else:
168 else:
169 raise error.RepoError(_("repository %s not found") % path)
169 raise error.RepoError(_("repository %s not found") % path)
170 elif create:
170 elif create:
171 raise error.RepoError(_("repository %s already exists") % path)
171 raise error.RepoError(_("repository %s already exists") % path)
172 else:
172 else:
173 try:
173 try:
174 requirements = scmutil.readrequires(self.vfs, self.supported)
174 requirements = scmutil.readrequires(self.vfs, self.supported)
175 except IOError, inst:
175 except IOError, inst:
176 if inst.errno != errno.ENOENT:
176 if inst.errno != errno.ENOENT:
177 raise
177 raise
178 requirements = set()
178 requirements = set()
179
179
180 self.sharedpath = self.path
180 self.sharedpath = self.path
181 try:
181 try:
182 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
182 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
183 if not os.path.exists(s):
183 if not os.path.exists(s):
184 raise error.RepoError(
184 raise error.RepoError(
185 _('.hg/sharedpath points to nonexistent directory %s') % s)
185 _('.hg/sharedpath points to nonexistent directory %s') % s)
186 self.sharedpath = s
186 self.sharedpath = s
187 except IOError, inst:
187 except IOError, inst:
188 if inst.errno != errno.ENOENT:
188 if inst.errno != errno.ENOENT:
189 raise
189 raise
190
190
191 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
191 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
192 self.spath = self.store.path
192 self.spath = self.store.path
193 self.svfs = self.store.vfs
193 self.svfs = self.store.vfs
194 self.sopener = self.svfs
194 self.sopener = self.svfs
195 self.sjoin = self.store.join
195 self.sjoin = self.store.join
196 self.vfs.createmode = self.store.createmode
196 self.vfs.createmode = self.store.createmode
197 self._applyrequirements(requirements)
197 self._applyrequirements(requirements)
198 if create:
198 if create:
199 self._writerequirements()
199 self._writerequirements()
200
200
201
201
202 self._branchcache = None
202 self._branchcache = None
203 self._branchcachetip = None
203 self._branchcachetip = None
204 self.filterpats = {}
204 self.filterpats = {}
205 self._datafilters = {}
205 self._datafilters = {}
206 self._transref = self._lockref = self._wlockref = None
206 self._transref = self._lockref = self._wlockref = None
207
207
208 # A cache for various files under .hg/ that tracks file changes,
208 # A cache for various files under .hg/ that tracks file changes,
209 # (used by the filecache decorator)
209 # (used by the filecache decorator)
210 #
210 #
211 # Maps a property name to its util.filecacheentry
211 # Maps a property name to its util.filecacheentry
212 self._filecache = {}
212 self._filecache = {}
213
213
214 def close(self):
214 def close(self):
215 pass
215 pass
216
216
217 def _restrictcapabilities(self, caps):
217 def _restrictcapabilities(self, caps):
218 return caps
218 return caps
219
219
220 def _applyrequirements(self, requirements):
220 def _applyrequirements(self, requirements):
221 self.requirements = requirements
221 self.requirements = requirements
222 self.sopener.options = dict((r, 1) for r in requirements
222 self.sopener.options = dict((r, 1) for r in requirements
223 if r in self.openerreqs)
223 if r in self.openerreqs)
224
224
225 def _writerequirements(self):
225 def _writerequirements(self):
226 reqfile = self.opener("requires", "w")
226 reqfile = self.opener("requires", "w")
227 for r in self.requirements:
227 for r in self.requirements:
228 reqfile.write("%s\n" % r)
228 reqfile.write("%s\n" % r)
229 reqfile.close()
229 reqfile.close()
230
230
231 def _checknested(self, path):
231 def _checknested(self, path):
232 """Determine if path is a legal nested repository."""
232 """Determine if path is a legal nested repository."""
233 if not path.startswith(self.root):
233 if not path.startswith(self.root):
234 return False
234 return False
235 subpath = path[len(self.root) + 1:]
235 subpath = path[len(self.root) + 1:]
236 normsubpath = util.pconvert(subpath)
236 normsubpath = util.pconvert(subpath)
237
237
238 # XXX: Checking against the current working copy is wrong in
238 # XXX: Checking against the current working copy is wrong in
239 # the sense that it can reject things like
239 # the sense that it can reject things like
240 #
240 #
241 # $ hg cat -r 10 sub/x.txt
241 # $ hg cat -r 10 sub/x.txt
242 #
242 #
243 # if sub/ is no longer a subrepository in the working copy
243 # if sub/ is no longer a subrepository in the working copy
244 # parent revision.
244 # parent revision.
245 #
245 #
246 # However, it can of course also allow things that would have
246 # However, it can of course also allow things that would have
247 # been rejected before, such as the above cat command if sub/
247 # been rejected before, such as the above cat command if sub/
248 # is a subrepository now, but was a normal directory before.
248 # is a subrepository now, but was a normal directory before.
249 # The old path auditor would have rejected by mistake since it
249 # The old path auditor would have rejected by mistake since it
250 # panics when it sees sub/.hg/.
250 # panics when it sees sub/.hg/.
251 #
251 #
252 # All in all, checking against the working copy seems sensible
252 # All in all, checking against the working copy seems sensible
253 # since we want to prevent access to nested repositories on
253 # since we want to prevent access to nested repositories on
254 # the filesystem *now*.
254 # the filesystem *now*.
255 ctx = self[None]
255 ctx = self[None]
256 parts = util.splitpath(subpath)
256 parts = util.splitpath(subpath)
257 while parts:
257 while parts:
258 prefix = '/'.join(parts)
258 prefix = '/'.join(parts)
259 if prefix in ctx.substate:
259 if prefix in ctx.substate:
260 if prefix == normsubpath:
260 if prefix == normsubpath:
261 return True
261 return True
262 else:
262 else:
263 sub = ctx.sub(prefix)
263 sub = ctx.sub(prefix)
264 return sub.checknested(subpath[len(prefix) + 1:])
264 return sub.checknested(subpath[len(prefix) + 1:])
265 else:
265 else:
266 parts.pop()
266 parts.pop()
267 return False
267 return False
268
268
269 def peer(self):
269 def peer(self):
270 return localpeer(self) # not cached to avoid reference cycle
270 return localpeer(self) # not cached to avoid reference cycle
271
271
272 def unfiltered(self):
272 def unfiltered(self):
273 """Return unfiltered version of the repository
273 """Return unfiltered version of the repository
274
274
275 Intended to be ovewritten by filtered repo."""
275 Intended to be ovewritten by filtered repo."""
276 return self
276 return self
277
277
278 @filecache('bookmarks')
278 @filecache('bookmarks')
279 def _bookmarks(self):
279 def _bookmarks(self):
280 return bookmarks.bmstore(self)
280 return bookmarks.bmstore(self)
281
281
282 @filecache('bookmarks.current')
282 @filecache('bookmarks.current')
283 def _bookmarkcurrent(self):
283 def _bookmarkcurrent(self):
284 return bookmarks.readcurrent(self)
284 return bookmarks.readcurrent(self)
285
285
286 def bookmarkheads(self, bookmark):
286 def bookmarkheads(self, bookmark):
287 name = bookmark.split('@', 1)[0]
287 name = bookmark.split('@', 1)[0]
288 heads = []
288 heads = []
289 for mark, n in self._bookmarks.iteritems():
289 for mark, n in self._bookmarks.iteritems():
290 if mark.split('@', 1)[0] == name:
290 if mark.split('@', 1)[0] == name:
291 heads.append(n)
291 heads.append(n)
292 return heads
292 return heads
293
293
294 @storecache('phaseroots')
294 @storecache('phaseroots')
295 def _phasecache(self):
295 def _phasecache(self):
296 return phases.phasecache(self, self._phasedefaults)
296 return phases.phasecache(self, self._phasedefaults)
297
297
298 @storecache('obsstore')
298 @storecache('obsstore')
299 def obsstore(self):
299 def obsstore(self):
300 store = obsolete.obsstore(self.sopener)
300 store = obsolete.obsstore(self.sopener)
301 if store and not obsolete._enabled:
301 if store and not obsolete._enabled:
302 # message is rare enough to not be translated
302 # message is rare enough to not be translated
303 msg = 'obsolete feature not enabled but %i markers found!\n'
303 msg = 'obsolete feature not enabled but %i markers found!\n'
304 self.ui.warn(msg % len(list(store)))
304 self.ui.warn(msg % len(list(store)))
305 return store
305 return store
306
306
307 @propertycache
307 @propertycache
308 def hiddenrevs(self):
308 def hiddenrevs(self):
309 """hiddenrevs: revs that should be hidden by command and tools
309 """hiddenrevs: revs that should be hidden by command and tools
310
310
311 This set is carried on the repo to ease initialization and lazy
311 This set is carried on the repo to ease initialization and lazy
312 loading; it'll probably move back to changelog for efficiency and
312 loading; it'll probably move back to changelog for efficiency and
313 consistency reasons.
313 consistency reasons.
314
314
315 Note that the hiddenrevs will needs invalidations when
315 Note that the hiddenrevs will needs invalidations when
316 - a new changesets is added (possible unstable above extinct)
316 - a new changesets is added (possible unstable above extinct)
317 - a new obsolete marker is added (possible new extinct changeset)
317 - a new obsolete marker is added (possible new extinct changeset)
318
318
319 hidden changesets cannot have non-hidden descendants
319 hidden changesets cannot have non-hidden descendants
320 """
320 """
321 hidden = set()
321 hidden = set()
322 if self.obsstore:
322 if self.obsstore:
323 ### hide extinct changeset that are not accessible by any mean
323 ### hide extinct changeset that are not accessible by any mean
324 hiddenquery = 'extinct() - ::(. + bookmark())'
324 hiddenquery = 'extinct() - ::(. + bookmark())'
325 hidden.update(self.revs(hiddenquery))
325 hidden.update(self.revs(hiddenquery))
326 return hidden
326 return hidden
327
327
328 @storecache('00changelog.i')
328 @storecache('00changelog.i')
329 def changelog(self):
329 def changelog(self):
330 c = changelog.changelog(self.sopener)
330 c = changelog.changelog(self.sopener)
331 if 'HG_PENDING' in os.environ:
331 if 'HG_PENDING' in os.environ:
332 p = os.environ['HG_PENDING']
332 p = os.environ['HG_PENDING']
333 if p.startswith(self.root):
333 if p.startswith(self.root):
334 c.readpending('00changelog.i.a')
334 c.readpending('00changelog.i.a')
335 return c
335 return c
336
336
337 @storecache('00manifest.i')
337 @storecache('00manifest.i')
338 def manifest(self):
338 def manifest(self):
339 return manifest.manifest(self.sopener)
339 return manifest.manifest(self.sopener)
340
340
341 @filecache('dirstate')
341 @filecache('dirstate')
342 def dirstate(self):
342 def dirstate(self):
343 warned = [0]
343 warned = [0]
344 def validate(node):
344 def validate(node):
345 try:
345 try:
346 self.changelog.rev(node)
346 self.changelog.rev(node)
347 return node
347 return node
348 except error.LookupError:
348 except error.LookupError:
349 if not warned[0]:
349 if not warned[0]:
350 warned[0] = True
350 warned[0] = True
351 self.ui.warn(_("warning: ignoring unknown"
351 self.ui.warn(_("warning: ignoring unknown"
352 " working parent %s!\n") % short(node))
352 " working parent %s!\n") % short(node))
353 return nullid
353 return nullid
354
354
355 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
355 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
356
356
357 def __getitem__(self, changeid):
357 def __getitem__(self, changeid):
358 if changeid is None:
358 if changeid is None:
359 return context.workingctx(self)
359 return context.workingctx(self)
360 return context.changectx(self, changeid)
360 return context.changectx(self, changeid)
361
361
362 def __contains__(self, changeid):
362 def __contains__(self, changeid):
363 try:
363 try:
364 return bool(self.lookup(changeid))
364 return bool(self.lookup(changeid))
365 except error.RepoLookupError:
365 except error.RepoLookupError:
366 return False
366 return False
367
367
368 def __nonzero__(self):
368 def __nonzero__(self):
369 return True
369 return True
370
370
371 def __len__(self):
371 def __len__(self):
372 return len(self.changelog)
372 return len(self.changelog)
373
373
374 def __iter__(self):
374 def __iter__(self):
375 return iter(self.changelog)
375 return iter(self.changelog)
376
376
377 def revs(self, expr, *args):
377 def revs(self, expr, *args):
378 '''Return a list of revisions matching the given revset'''
378 '''Return a list of revisions matching the given revset'''
379 expr = revset.formatspec(expr, *args)
379 expr = revset.formatspec(expr, *args)
380 m = revset.match(None, expr)
380 m = revset.match(None, expr)
381 return [r for r in m(self, list(self))]
381 return [r for r in m(self, list(self))]
382
382
383 def set(self, expr, *args):
383 def set(self, expr, *args):
384 '''
384 '''
385 Yield a context for each matching revision, after doing arg
385 Yield a context for each matching revision, after doing arg
386 replacement via revset.formatspec
386 replacement via revset.formatspec
387 '''
387 '''
388 for r in self.revs(expr, *args):
388 for r in self.revs(expr, *args):
389 yield self[r]
389 yield self[r]
390
390
391 def url(self):
391 def url(self):
392 return 'file:' + self.root
392 return 'file:' + self.root
393
393
394 def hook(self, name, throw=False, **args):
394 def hook(self, name, throw=False, **args):
395 return hook.hook(self.ui, self, name, throw, **args)
395 return hook.hook(self.ui, self, name, throw, **args)
396
396
397 @unfilteredmeth
397 @unfilteredmeth
398 def _tag(self, names, node, message, local, user, date, extra={}):
398 def _tag(self, names, node, message, local, user, date, extra={}):
399 if isinstance(names, str):
399 if isinstance(names, str):
400 names = (names,)
400 names = (names,)
401
401
402 branches = self.branchmap()
402 branches = self.branchmap()
403 for name in names:
403 for name in names:
404 self.hook('pretag', throw=True, node=hex(node), tag=name,
404 self.hook('pretag', throw=True, node=hex(node), tag=name,
405 local=local)
405 local=local)
406 if name in branches:
406 if name in branches:
407 self.ui.warn(_("warning: tag %s conflicts with existing"
407 self.ui.warn(_("warning: tag %s conflicts with existing"
408 " branch name\n") % name)
408 " branch name\n") % name)
409
409
410 def writetags(fp, names, munge, prevtags):
410 def writetags(fp, names, munge, prevtags):
411 fp.seek(0, 2)
411 fp.seek(0, 2)
412 if prevtags and prevtags[-1] != '\n':
412 if prevtags and prevtags[-1] != '\n':
413 fp.write('\n')
413 fp.write('\n')
414 for name in names:
414 for name in names:
415 m = munge and munge(name) or name
415 m = munge and munge(name) or name
416 if (self._tagscache.tagtypes and
416 if (self._tagscache.tagtypes and
417 name in self._tagscache.tagtypes):
417 name in self._tagscache.tagtypes):
418 old = self.tags().get(name, nullid)
418 old = self.tags().get(name, nullid)
419 fp.write('%s %s\n' % (hex(old), m))
419 fp.write('%s %s\n' % (hex(old), m))
420 fp.write('%s %s\n' % (hex(node), m))
420 fp.write('%s %s\n' % (hex(node), m))
421 fp.close()
421 fp.close()
422
422
423 prevtags = ''
423 prevtags = ''
424 if local:
424 if local:
425 try:
425 try:
426 fp = self.opener('localtags', 'r+')
426 fp = self.opener('localtags', 'r+')
427 except IOError:
427 except IOError:
428 fp = self.opener('localtags', 'a')
428 fp = self.opener('localtags', 'a')
429 else:
429 else:
430 prevtags = fp.read()
430 prevtags = fp.read()
431
431
432 # local tags are stored in the current charset
432 # local tags are stored in the current charset
433 writetags(fp, names, None, prevtags)
433 writetags(fp, names, None, prevtags)
434 for name in names:
434 for name in names:
435 self.hook('tag', node=hex(node), tag=name, local=local)
435 self.hook('tag', node=hex(node), tag=name, local=local)
436 return
436 return
437
437
438 try:
438 try:
439 fp = self.wfile('.hgtags', 'rb+')
439 fp = self.wfile('.hgtags', 'rb+')
440 except IOError, e:
440 except IOError, e:
441 if e.errno != errno.ENOENT:
441 if e.errno != errno.ENOENT:
442 raise
442 raise
443 fp = self.wfile('.hgtags', 'ab')
443 fp = self.wfile('.hgtags', 'ab')
444 else:
444 else:
445 prevtags = fp.read()
445 prevtags = fp.read()
446
446
447 # committed tags are stored in UTF-8
447 # committed tags are stored in UTF-8
448 writetags(fp, names, encoding.fromlocal, prevtags)
448 writetags(fp, names, encoding.fromlocal, prevtags)
449
449
450 fp.close()
450 fp.close()
451
451
452 self.invalidatecaches()
452 self.invalidatecaches()
453
453
454 if '.hgtags' not in self.dirstate:
454 if '.hgtags' not in self.dirstate:
455 self[None].add(['.hgtags'])
455 self[None].add(['.hgtags'])
456
456
457 m = matchmod.exact(self.root, '', ['.hgtags'])
457 m = matchmod.exact(self.root, '', ['.hgtags'])
458 tagnode = self.commit(message, user, date, extra=extra, match=m)
458 tagnode = self.commit(message, user, date, extra=extra, match=m)
459
459
460 for name in names:
460 for name in names:
461 self.hook('tag', node=hex(node), tag=name, local=local)
461 self.hook('tag', node=hex(node), tag=name, local=local)
462
462
463 return tagnode
463 return tagnode
464
464
465 def tag(self, names, node, message, local, user, date):
465 def tag(self, names, node, message, local, user, date):
466 '''tag a revision with one or more symbolic names.
466 '''tag a revision with one or more symbolic names.
467
467
468 names is a list of strings or, when adding a single tag, names may be a
468 names is a list of strings or, when adding a single tag, names may be a
469 string.
469 string.
470
470
471 if local is True, the tags are stored in a per-repository file.
471 if local is True, the tags are stored in a per-repository file.
472 otherwise, they are stored in the .hgtags file, and a new
472 otherwise, they are stored in the .hgtags file, and a new
473 changeset is committed with the change.
473 changeset is committed with the change.
474
474
475 keyword arguments:
475 keyword arguments:
476
476
477 local: whether to store tags in non-version-controlled file
477 local: whether to store tags in non-version-controlled file
478 (default False)
478 (default False)
479
479
480 message: commit message to use if committing
480 message: commit message to use if committing
481
481
482 user: name of user to use if committing
482 user: name of user to use if committing
483
483
484 date: date tuple to use if committing'''
484 date: date tuple to use if committing'''
485
485
486 if not local:
486 if not local:
487 for x in self.status()[:5]:
487 for x in self.status()[:5]:
488 if '.hgtags' in x:
488 if '.hgtags' in x:
489 raise util.Abort(_('working copy of .hgtags is changed '
489 raise util.Abort(_('working copy of .hgtags is changed '
490 '(please commit .hgtags manually)'))
490 '(please commit .hgtags manually)'))
491
491
492 self.tags() # instantiate the cache
492 self.tags() # instantiate the cache
493 self._tag(names, node, message, local, user, date)
493 self._tag(names, node, message, local, user, date)
494
494
495 @propertycache
495 @propertycache
496 def _tagscache(self):
496 def _tagscache(self):
497 '''Returns a tagscache object that contains various tags related
497 '''Returns a tagscache object that contains various tags related
498 caches.'''
498 caches.'''
499
499
500 # This simplifies its cache management by having one decorated
500 # This simplifies its cache management by having one decorated
501 # function (this one) and the rest simply fetch things from it.
501 # function (this one) and the rest simply fetch things from it.
502 class tagscache(object):
502 class tagscache(object):
503 def __init__(self):
503 def __init__(self):
504 # These two define the set of tags for this repository. tags
504 # These two define the set of tags for this repository. tags
505 # maps tag name to node; tagtypes maps tag name to 'global' or
505 # maps tag name to node; tagtypes maps tag name to 'global' or
506 # 'local'. (Global tags are defined by .hgtags across all
506 # 'local'. (Global tags are defined by .hgtags across all
507 # heads, and local tags are defined in .hg/localtags.)
507 # heads, and local tags are defined in .hg/localtags.)
508 # They constitute the in-memory cache of tags.
508 # They constitute the in-memory cache of tags.
509 self.tags = self.tagtypes = None
509 self.tags = self.tagtypes = None
510
510
511 self.nodetagscache = self.tagslist = None
511 self.nodetagscache = self.tagslist = None
512
512
513 cache = tagscache()
513 cache = tagscache()
514 cache.tags, cache.tagtypes = self._findtags()
514 cache.tags, cache.tagtypes = self._findtags()
515
515
516 return cache
516 return cache
517
517
518 def tags(self):
518 def tags(self):
519 '''return a mapping of tag to node'''
519 '''return a mapping of tag to node'''
520 t = {}
520 t = {}
521 if self.changelog.filteredrevs:
521 if self.changelog.filteredrevs:
522 tags, tt = self._findtags()
522 tags, tt = self._findtags()
523 else:
523 else:
524 tags = self._tagscache.tags
524 tags = self._tagscache.tags
525 for k, v in tags.iteritems():
525 for k, v in tags.iteritems():
526 try:
526 try:
527 # ignore tags to unknown nodes
527 # ignore tags to unknown nodes
528 self.changelog.rev(v)
528 self.changelog.rev(v)
529 t[k] = v
529 t[k] = v
530 except (error.LookupError, ValueError):
530 except (error.LookupError, ValueError):
531 pass
531 pass
532 return t
532 return t
533
533
534 def _findtags(self):
534 def _findtags(self):
535 '''Do the hard work of finding tags. Return a pair of dicts
535 '''Do the hard work of finding tags. Return a pair of dicts
536 (tags, tagtypes) where tags maps tag name to node, and tagtypes
536 (tags, tagtypes) where tags maps tag name to node, and tagtypes
537 maps tag name to a string like \'global\' or \'local\'.
537 maps tag name to a string like \'global\' or \'local\'.
538 Subclasses or extensions are free to add their own tags, but
538 Subclasses or extensions are free to add their own tags, but
539 should be aware that the returned dicts will be retained for the
539 should be aware that the returned dicts will be retained for the
540 duration of the localrepo object.'''
540 duration of the localrepo object.'''
541
541
542 # XXX what tagtype should subclasses/extensions use? Currently
542 # XXX what tagtype should subclasses/extensions use? Currently
543 # mq and bookmarks add tags, but do not set the tagtype at all.
543 # mq and bookmarks add tags, but do not set the tagtype at all.
544 # Should each extension invent its own tag type? Should there
544 # Should each extension invent its own tag type? Should there
545 # be one tagtype for all such "virtual" tags? Or is the status
545 # be one tagtype for all such "virtual" tags? Or is the status
546 # quo fine?
546 # quo fine?
547
547
548 alltags = {} # map tag name to (node, hist)
548 alltags = {} # map tag name to (node, hist)
549 tagtypes = {}
549 tagtypes = {}
550
550
551 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
551 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
552 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
552 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
553
553
554 # Build the return dicts. Have to re-encode tag names because
554 # Build the return dicts. Have to re-encode tag names because
555 # the tags module always uses UTF-8 (in order not to lose info
555 # the tags module always uses UTF-8 (in order not to lose info
556 # writing to the cache), but the rest of Mercurial wants them in
556 # writing to the cache), but the rest of Mercurial wants them in
557 # local encoding.
557 # local encoding.
558 tags = {}
558 tags = {}
559 for (name, (node, hist)) in alltags.iteritems():
559 for (name, (node, hist)) in alltags.iteritems():
560 if node != nullid:
560 if node != nullid:
561 tags[encoding.tolocal(name)] = node
561 tags[encoding.tolocal(name)] = node
562 tags['tip'] = self.changelog.tip()
562 tags['tip'] = self.changelog.tip()
563 tagtypes = dict([(encoding.tolocal(name), value)
563 tagtypes = dict([(encoding.tolocal(name), value)
564 for (name, value) in tagtypes.iteritems()])
564 for (name, value) in tagtypes.iteritems()])
565 return (tags, tagtypes)
565 return (tags, tagtypes)
566
566
567 def tagtype(self, tagname):
567 def tagtype(self, tagname):
568 '''
568 '''
569 return the type of the given tag. result can be:
569 return the type of the given tag. result can be:
570
570
571 'local' : a local tag
571 'local' : a local tag
572 'global' : a global tag
572 'global' : a global tag
573 None : tag does not exist
573 None : tag does not exist
574 '''
574 '''
575
575
576 return self._tagscache.tagtypes.get(tagname)
576 return self._tagscache.tagtypes.get(tagname)
577
577
578 def tagslist(self):
578 def tagslist(self):
579 '''return a list of tags ordered by revision'''
579 '''return a list of tags ordered by revision'''
580 if not self._tagscache.tagslist:
580 if not self._tagscache.tagslist:
581 l = []
581 l = []
582 for t, n in self.tags().iteritems():
582 for t, n in self.tags().iteritems():
583 r = self.changelog.rev(n)
583 r = self.changelog.rev(n)
584 l.append((r, t, n))
584 l.append((r, t, n))
585 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
585 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
586
586
587 return self._tagscache.tagslist
587 return self._tagscache.tagslist
588
588
589 def nodetags(self, node):
589 def nodetags(self, node):
590 '''return the tags associated with a node'''
590 '''return the tags associated with a node'''
591 if not self._tagscache.nodetagscache:
591 if not self._tagscache.nodetagscache:
592 nodetagscache = {}
592 nodetagscache = {}
593 for t, n in self._tagscache.tags.iteritems():
593 for t, n in self._tagscache.tags.iteritems():
594 nodetagscache.setdefault(n, []).append(t)
594 nodetagscache.setdefault(n, []).append(t)
595 for tags in nodetagscache.itervalues():
595 for tags in nodetagscache.itervalues():
596 tags.sort()
596 tags.sort()
597 self._tagscache.nodetagscache = nodetagscache
597 self._tagscache.nodetagscache = nodetagscache
598 return self._tagscache.nodetagscache.get(node, [])
598 return self._tagscache.nodetagscache.get(node, [])
599
599
600 def nodebookmarks(self, node):
600 def nodebookmarks(self, node):
601 marks = []
601 marks = []
602 for bookmark, n in self._bookmarks.iteritems():
602 for bookmark, n in self._bookmarks.iteritems():
603 if n == node:
603 if n == node:
604 marks.append(bookmark)
604 marks.append(bookmark)
605 return sorted(marks)
605 return sorted(marks)
606
606
607 def _branchtags(self, partial, lrev):
607 def _branchtags(self, partial, lrev):
608 # TODO: rename this function?
608 # TODO: rename this function?
609 tiprev = len(self) - 1
609 tiprev = len(self) - 1
610 if lrev != tiprev:
610 if lrev != tiprev:
611 ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
611 ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
612 self._updatebranchcache(partial, ctxgen)
612 self._updatebranchcache(partial, ctxgen)
613 self._writebranchcache(partial, self.changelog.tip(), tiprev)
613 self._writebranchcache(partial, self.changelog.tip(), tiprev)
614
614
615 return partial
615 return partial
616
616
617 @unfilteredmeth # Until we get a smarter cache management
617 @unfilteredmeth # Until we get a smarter cache management
618 def updatebranchcache(self):
618 def updatebranchcache(self):
619 tip = self.changelog.tip()
619 tip = self.changelog.tip()
620 if self._branchcache is not None and self._branchcachetip == tip:
620 if self._branchcache is not None and self._branchcachetip == tip:
621 return
621 return
622
622
623 oldtip = self._branchcachetip
623 oldtip = self._branchcachetip
624 self._branchcachetip = tip
624 self._branchcachetip = tip
625 if oldtip is None or oldtip not in self.changelog.nodemap:
625 if oldtip is None or oldtip not in self.changelog.nodemap:
626 partial, last, lrev = self._readbranchcache()
626 partial, last, lrev = self._readbranchcache()
627 else:
627 else:
628 lrev = self.changelog.rev(oldtip)
628 lrev = self.changelog.rev(oldtip)
629 partial = self._branchcache
629 partial = self._branchcache
630
630
631 self._branchtags(partial, lrev)
631 self._branchtags(partial, lrev)
632 # this private cache holds all heads (not just the branch tips)
632 # this private cache holds all heads (not just the branch tips)
633 self._branchcache = partial
633 self._branchcache = partial
634
634
635 def branchmap(self):
635 def branchmap(self):
636 '''returns a dictionary {branch: [branchheads]}'''
636 '''returns a dictionary {branch: [branchheads]}'''
637 if self.changelog.filteredrevs:
637 if self.changelog.filteredrevs:
638 # some changeset are excluded we can't use the cache
638 # some changeset are excluded we can't use the cache
639 branchmap = {}
639 branchmap = {}
640 self._updatebranchcache(branchmap, (self[r] for r in self))
640 self._updatebranchcache(branchmap, (self[r] for r in self))
641 return branchmap
641 return branchmap
642 else:
642 else:
643 self.updatebranchcache()
643 self.updatebranchcache()
644 return self._branchcache
644 return self._branchcache
645
645
646
646
647 def _branchtip(self, heads):
647 def _branchtip(self, heads):
648 '''return the tipmost branch head in heads'''
648 '''return the tipmost branch head in heads'''
649 tip = heads[-1]
649 tip = heads[-1]
650 for h in reversed(heads):
650 for h in reversed(heads):
651 if not self[h].closesbranch():
651 if not self[h].closesbranch():
652 tip = h
652 tip = h
653 break
653 break
654 return tip
654 return tip
655
655
656 def branchtip(self, branch):
656 def branchtip(self, branch):
657 '''return the tip node for a given branch'''
657 '''return the tip node for a given branch'''
658 if branch not in self.branchmap():
658 if branch not in self.branchmap():
659 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
659 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
660 return self._branchtip(self.branchmap()[branch])
660 return self._branchtip(self.branchmap()[branch])
661
661
662 def branchtags(self):
662 def branchtags(self):
663 '''return a dict where branch names map to the tipmost head of
663 '''return a dict where branch names map to the tipmost head of
664 the branch, open heads come before closed'''
664 the branch, open heads come before closed'''
665 bt = {}
665 bt = {}
666 for bn, heads in self.branchmap().iteritems():
666 for bn, heads in self.branchmap().iteritems():
667 bt[bn] = self._branchtip(heads)
667 bt[bn] = self._branchtip(heads)
668 return bt
668 return bt
669
669
670 @unfilteredmeth # Until we get a smarter cache management
670 @unfilteredmeth # Until we get a smarter cache management
671 def _readbranchcache(self):
671 def _readbranchcache(self):
672 partial = {}
672 partial = {}
673 try:
673 try:
674 f = self.opener("cache/branchheads")
674 f = self.opener("cache/branchheads")
675 lines = f.read().split('\n')
675 lines = f.read().split('\n')
676 f.close()
676 f.close()
677 except (IOError, OSError):
677 except (IOError, OSError):
678 return {}, nullid, nullrev
678 return {}, nullid, nullrev
679
679
680 try:
680 try:
681 last, lrev = lines.pop(0).split(" ", 1)
681 last, lrev = lines.pop(0).split(" ", 1)
682 last, lrev = bin(last), int(lrev)
682 last, lrev = bin(last), int(lrev)
683 if lrev >= len(self) or self[lrev].node() != last:
683 if lrev >= len(self) or self[lrev].node() != last:
684 # invalidate the cache
684 # invalidate the cache
685 raise ValueError('invalidating branch cache (tip differs)')
685 raise ValueError('invalidating branch cache (tip differs)')
686 for l in lines:
686 for l in lines:
687 if not l:
687 if not l:
688 continue
688 continue
689 node, label = l.split(" ", 1)
689 node, label = l.split(" ", 1)
690 label = encoding.tolocal(label.strip())
690 label = encoding.tolocal(label.strip())
691 if not node in self:
691 if not node in self:
692 raise ValueError('invalidating branch cache because node '+
692 raise ValueError('invalidating branch cache because node '+
693 '%s does not exist' % node)
693 '%s does not exist' % node)
694 partial.setdefault(label, []).append(bin(node))
694 partial.setdefault(label, []).append(bin(node))
695 except KeyboardInterrupt:
695 except KeyboardInterrupt:
696 raise
696 raise
697 except Exception, inst:
697 except Exception, inst:
698 if self.ui.debugflag:
698 if self.ui.debugflag:
699 self.ui.warn(str(inst), '\n')
699 self.ui.warn(str(inst), '\n')
700 partial, last, lrev = {}, nullid, nullrev
700 partial, last, lrev = {}, nullid, nullrev
701 return partial, last, lrev
701 return partial, last, lrev
702
702
703 @unfilteredmeth # Until we get a smarter cache management
703 @unfilteredmeth # Until we get a smarter cache management
704 def _writebranchcache(self, branches, tip, tiprev):
704 def _writebranchcache(self, branches, tip, tiprev):
705 try:
705 try:
706 f = self.opener("cache/branchheads", "w", atomictemp=True)
706 f = self.opener("cache/branchheads", "w", atomictemp=True)
707 f.write("%s %s\n" % (hex(tip), tiprev))
707 f.write("%s %s\n" % (hex(tip), tiprev))
708 for label, nodes in branches.iteritems():
708 for label, nodes in branches.iteritems():
709 for node in nodes:
709 for node in nodes:
710 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
710 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
711 f.close()
711 f.close()
712 except (IOError, OSError):
712 except (IOError, OSError):
713 pass
713 pass
714
714
715 @unfilteredmeth # Until we get a smarter cache management
715 @unfilteredmeth # Until we get a smarter cache management
716 def _updatebranchcache(self, partial, ctxgen):
716 def _updatebranchcache(self, partial, ctxgen):
717 """Given a branchhead cache, partial, that may have extra nodes or be
717 """Given a branchhead cache, partial, that may have extra nodes or be
718 missing heads, and a generator of nodes that are at least a superset of
718 missing heads, and a generator of nodes that are at least a superset of
719 heads missing, this function updates partial to be correct.
719 heads missing, this function updates partial to be correct.
720 """
720 """
721 # collect new branch entries
721 # collect new branch entries
722 newbranches = {}
722 newbranches = {}
723 for c in ctxgen:
723 for c in ctxgen:
724 newbranches.setdefault(c.branch(), []).append(c.node())
724 newbranches.setdefault(c.branch(), []).append(c.node())
725 # if older branchheads are reachable from new ones, they aren't
725 # if older branchheads are reachable from new ones, they aren't
726 # really branchheads. Note checking parents is insufficient:
726 # really branchheads. Note checking parents is insufficient:
727 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
727 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
728 for branch, newnodes in newbranches.iteritems():
728 for branch, newnodes in newbranches.iteritems():
729 bheads = partial.setdefault(branch, [])
729 bheads = partial.setdefault(branch, [])
730 # Remove candidate heads that no longer are in the repo (e.g., as
730 # Remove candidate heads that no longer are in the repo (e.g., as
731 # the result of a strip that just happened). Avoid using 'node in
731 # the result of a strip that just happened). Avoid using 'node in
732 # self' here because that dives down into branchcache code somewhat
732 # self' here because that dives down into branchcache code somewhat
733 # recursively.
733 # recursively.
734 bheadrevs = [self.changelog.rev(node) for node in bheads
734 bheadrevs = [self.changelog.rev(node) for node in bheads
735 if self.changelog.hasnode(node)]
735 if self.changelog.hasnode(node)]
736 newheadrevs = [self.changelog.rev(node) for node in newnodes
736 newheadrevs = [self.changelog.rev(node) for node in newnodes
737 if self.changelog.hasnode(node)]
737 if self.changelog.hasnode(node)]
738 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
738 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
739 # Remove duplicates - nodes that are in newheadrevs and are already
739 # Remove duplicates - nodes that are in newheadrevs and are already
740 # in bheadrevs. This can happen if you strip a node whose parent
740 # in bheadrevs. This can happen if you strip a node whose parent
741 # was already a head (because they're on different branches).
741 # was already a head (because they're on different branches).
742 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
742 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
743
743
744 # Starting from tip means fewer passes over reachable. If we know
744 # Starting from tip means fewer passes over reachable. If we know
745 # the new candidates are not ancestors of existing heads, we don't
745 # the new candidates are not ancestors of existing heads, we don't
746 # have to examine ancestors of existing heads
746 # have to examine ancestors of existing heads
747 if ctxisnew:
747 if ctxisnew:
748 iterrevs = sorted(newheadrevs)
748 iterrevs = sorted(newheadrevs)
749 else:
749 else:
750 iterrevs = list(bheadrevs)
750 iterrevs = list(bheadrevs)
751
751
752 # This loop prunes out two kinds of heads - heads that are
752 # This loop prunes out two kinds of heads - heads that are
753 # superseded by a head in newheadrevs, and newheadrevs that are not
753 # superseded by a head in newheadrevs, and newheadrevs that are not
754 # heads because an existing head is their descendant.
754 # heads because an existing head is their descendant.
755 while iterrevs:
755 while iterrevs:
756 latest = iterrevs.pop()
756 latest = iterrevs.pop()
757 if latest not in bheadrevs:
757 if latest not in bheadrevs:
758 continue
758 continue
759 ancestors = set(self.changelog.ancestors([latest],
759 ancestors = set(self.changelog.ancestors([latest],
760 bheadrevs[0]))
760 bheadrevs[0]))
761 if ancestors:
761 if ancestors:
762 bheadrevs = [b for b in bheadrevs if b not in ancestors]
762 bheadrevs = [b for b in bheadrevs if b not in ancestors]
763 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
763 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
764
764
765 # There may be branches that cease to exist when the last commit in the
765 # There may be branches that cease to exist when the last commit in the
766 # branch was stripped. This code filters them out. Note that the
766 # branch was stripped. This code filters them out. Note that the
767 # branch that ceased to exist may not be in newbranches because
767 # branch that ceased to exist may not be in newbranches because
768 # newbranches is the set of candidate heads, which when you strip the
768 # newbranches is the set of candidate heads, which when you strip the
769 # last commit in a branch will be the parent branch.
769 # last commit in a branch will be the parent branch.
770 for branch in partial.keys():
770 for branch in partial.keys():
771 nodes = [head for head in partial[branch]
771 nodes = [head for head in partial[branch]
772 if self.changelog.hasnode(head)]
772 if self.changelog.hasnode(head)]
773 if not nodes:
773 if not nodes:
774 del partial[branch]
774 del partial[branch]
775
775
776 def lookup(self, key):
776 def lookup(self, key):
777 return self[key].node()
777 return self[key].node()
778
778
779 def lookupbranch(self, key, remote=None):
779 def lookupbranch(self, key, remote=None):
780 repo = remote or self
780 repo = remote or self
781 if key in repo.branchmap():
781 if key in repo.branchmap():
782 return key
782 return key
783
783
784 repo = (remote and remote.local()) and remote or self
784 repo = (remote and remote.local()) and remote or self
785 return repo[key].branch()
785 return repo[key].branch()
786
786
787 def known(self, nodes):
787 def known(self, nodes):
788 nm = self.changelog.nodemap
788 nm = self.changelog.nodemap
789 pc = self._phasecache
789 pc = self._phasecache
790 result = []
790 result = []
791 for n in nodes:
791 for n in nodes:
792 r = nm.get(n)
792 r = nm.get(n)
793 resp = not (r is None or pc.phase(self, r) >= phases.secret)
793 resp = not (r is None or pc.phase(self, r) >= phases.secret)
794 result.append(resp)
794 result.append(resp)
795 return result
795 return result
796
796
797 def local(self):
797 def local(self):
798 return self
798 return self
799
799
800 def cancopy(self):
800 def cancopy(self):
801 return self.local() # so statichttprepo's override of local() works
801 return self.local() # so statichttprepo's override of local() works
802
802
803 def join(self, f):
803 def join(self, f):
804 return os.path.join(self.path, f)
804 return os.path.join(self.path, f)
805
805
806 def wjoin(self, f):
806 def wjoin(self, f):
807 return os.path.join(self.root, f)
807 return os.path.join(self.root, f)
808
808
809 def file(self, f):
809 def file(self, f):
810 if f[0] == '/':
810 if f[0] == '/':
811 f = f[1:]
811 f = f[1:]
812 return filelog.filelog(self.sopener, f)
812 return filelog.filelog(self.sopener, f)
813
813
814 def changectx(self, changeid):
814 def changectx(self, changeid):
815 return self[changeid]
815 return self[changeid]
816
816
817 def parents(self, changeid=None):
817 def parents(self, changeid=None):
818 '''get list of changectxs for parents of changeid'''
818 '''get list of changectxs for parents of changeid'''
819 return self[changeid].parents()
819 return self[changeid].parents()
820
820
821 def setparents(self, p1, p2=nullid):
821 def setparents(self, p1, p2=nullid):
822 copies = self.dirstate.setparents(p1, p2)
822 copies = self.dirstate.setparents(p1, p2)
823 if copies:
823 if copies:
824 # Adjust copy records, the dirstate cannot do it, it
824 # Adjust copy records, the dirstate cannot do it, it
825 # requires access to parents manifests. Preserve them
825 # requires access to parents manifests. Preserve them
826 # only for entries added to first parent.
826 # only for entries added to first parent.
827 pctx = self[p1]
827 pctx = self[p1]
828 for f in copies:
828 for f in copies:
829 if f not in pctx and copies[f] in pctx:
829 if f not in pctx and copies[f] in pctx:
830 self.dirstate.copy(copies[f], f)
830 self.dirstate.copy(copies[f], f)
831
831
832 def filectx(self, path, changeid=None, fileid=None):
832 def filectx(self, path, changeid=None, fileid=None):
833 """changeid can be a changeset revision, node, or tag.
833 """changeid can be a changeset revision, node, or tag.
834 fileid can be a file revision or node."""
834 fileid can be a file revision or node."""
835 return context.filectx(self, path, changeid, fileid)
835 return context.filectx(self, path, changeid, fileid)
836
836
837 def getcwd(self):
837 def getcwd(self):
838 return self.dirstate.getcwd()
838 return self.dirstate.getcwd()
839
839
840 def pathto(self, f, cwd=None):
840 def pathto(self, f, cwd=None):
841 return self.dirstate.pathto(f, cwd)
841 return self.dirstate.pathto(f, cwd)
842
842
843 def wfile(self, f, mode='r'):
843 def wfile(self, f, mode='r'):
844 return self.wopener(f, mode)
844 return self.wopener(f, mode)
845
845
846 def _link(self, f):
846 def _link(self, f):
847 return os.path.islink(self.wjoin(f))
847 return os.path.islink(self.wjoin(f))
848
848
849 def _loadfilter(self, filter):
849 def _loadfilter(self, filter):
850 if filter not in self.filterpats:
850 if filter not in self.filterpats:
851 l = []
851 l = []
852 for pat, cmd in self.ui.configitems(filter):
852 for pat, cmd in self.ui.configitems(filter):
853 if cmd == '!':
853 if cmd == '!':
854 continue
854 continue
855 mf = matchmod.match(self.root, '', [pat])
855 mf = matchmod.match(self.root, '', [pat])
856 fn = None
856 fn = None
857 params = cmd
857 params = cmd
858 for name, filterfn in self._datafilters.iteritems():
858 for name, filterfn in self._datafilters.iteritems():
859 if cmd.startswith(name):
859 if cmd.startswith(name):
860 fn = filterfn
860 fn = filterfn
861 params = cmd[len(name):].lstrip()
861 params = cmd[len(name):].lstrip()
862 break
862 break
863 if not fn:
863 if not fn:
864 fn = lambda s, c, **kwargs: util.filter(s, c)
864 fn = lambda s, c, **kwargs: util.filter(s, c)
865 # Wrap old filters not supporting keyword arguments
865 # Wrap old filters not supporting keyword arguments
866 if not inspect.getargspec(fn)[2]:
866 if not inspect.getargspec(fn)[2]:
867 oldfn = fn
867 oldfn = fn
868 fn = lambda s, c, **kwargs: oldfn(s, c)
868 fn = lambda s, c, **kwargs: oldfn(s, c)
869 l.append((mf, fn, params))
869 l.append((mf, fn, params))
870 self.filterpats[filter] = l
870 self.filterpats[filter] = l
871 return self.filterpats[filter]
871 return self.filterpats[filter]
872
872
873 def _filter(self, filterpats, filename, data):
873 def _filter(self, filterpats, filename, data):
874 for mf, fn, cmd in filterpats:
874 for mf, fn, cmd in filterpats:
875 if mf(filename):
875 if mf(filename):
876 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
876 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
877 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
877 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
878 break
878 break
879
879
880 return data
880 return data
881
881
882 @propertycache
882 @propertycache
883 def _encodefilterpats(self):
883 def _encodefilterpats(self):
884 return self._loadfilter('encode')
884 return self._loadfilter('encode')
885
885
886 @propertycache
886 @propertycache
887 def _decodefilterpats(self):
887 def _decodefilterpats(self):
888 return self._loadfilter('decode')
888 return self._loadfilter('decode')
889
889
890 def adddatafilter(self, name, filter):
890 def adddatafilter(self, name, filter):
891 self._datafilters[name] = filter
891 self._datafilters[name] = filter
892
892
893 def wread(self, filename):
893 def wread(self, filename):
894 if self._link(filename):
894 if self._link(filename):
895 data = os.readlink(self.wjoin(filename))
895 data = os.readlink(self.wjoin(filename))
896 else:
896 else:
897 data = self.wopener.read(filename)
897 data = self.wopener.read(filename)
898 return self._filter(self._encodefilterpats, filename, data)
898 return self._filter(self._encodefilterpats, filename, data)
899
899
900 def wwrite(self, filename, data, flags):
900 def wwrite(self, filename, data, flags):
901 data = self._filter(self._decodefilterpats, filename, data)
901 data = self._filter(self._decodefilterpats, filename, data)
902 if 'l' in flags:
902 if 'l' in flags:
903 self.wopener.symlink(data, filename)
903 self.wopener.symlink(data, filename)
904 else:
904 else:
905 self.wopener.write(filename, data)
905 self.wopener.write(filename, data)
906 if 'x' in flags:
906 if 'x' in flags:
907 util.setflags(self.wjoin(filename), False, True)
907 util.setflags(self.wjoin(filename), False, True)
908
908
909 def wwritedata(self, filename, data):
909 def wwritedata(self, filename, data):
910 return self._filter(self._decodefilterpats, filename, data)
910 return self._filter(self._decodefilterpats, filename, data)
911
911
912 def transaction(self, desc):
912 def transaction(self, desc):
913 tr = self._transref and self._transref() or None
913 tr = self._transref and self._transref() or None
914 if tr and tr.running():
914 if tr and tr.running():
915 return tr.nest()
915 return tr.nest()
916
916
917 # abort here if the journal already exists
917 # abort here if the journal already exists
918 if os.path.exists(self.sjoin("journal")):
918 if os.path.exists(self.sjoin("journal")):
919 raise error.RepoError(
919 raise error.RepoError(
920 _("abandoned transaction found - run hg recover"))
920 _("abandoned transaction found - run hg recover"))
921
921
922 self._writejournal(desc)
922 self._writejournal(desc)
923 renames = [(x, undoname(x)) for x in self._journalfiles()]
923 renames = [(x, undoname(x)) for x in self._journalfiles()]
924
924
925 tr = transaction.transaction(self.ui.warn, self.sopener,
925 tr = transaction.transaction(self.ui.warn, self.sopener,
926 self.sjoin("journal"),
926 self.sjoin("journal"),
927 aftertrans(renames),
927 aftertrans(renames),
928 self.store.createmode)
928 self.store.createmode)
929 self._transref = weakref.ref(tr)
929 self._transref = weakref.ref(tr)
930 return tr
930 return tr
931
931
932 def _journalfiles(self):
932 def _journalfiles(self):
933 return (self.sjoin('journal'), self.join('journal.dirstate'),
933 return (self.sjoin('journal'), self.join('journal.dirstate'),
934 self.join('journal.branch'), self.join('journal.desc'),
934 self.join('journal.branch'), self.join('journal.desc'),
935 self.join('journal.bookmarks'),
935 self.join('journal.bookmarks'),
936 self.sjoin('journal.phaseroots'))
936 self.sjoin('journal.phaseroots'))
937
937
938 def undofiles(self):
938 def undofiles(self):
939 return [undoname(x) for x in self._journalfiles()]
939 return [undoname(x) for x in self._journalfiles()]
940
940
941 def _writejournal(self, desc):
941 def _writejournal(self, desc):
942 self.opener.write("journal.dirstate",
942 self.opener.write("journal.dirstate",
943 self.opener.tryread("dirstate"))
943 self.opener.tryread("dirstate"))
944 self.opener.write("journal.branch",
944 self.opener.write("journal.branch",
945 encoding.fromlocal(self.dirstate.branch()))
945 encoding.fromlocal(self.dirstate.branch()))
946 self.opener.write("journal.desc",
946 self.opener.write("journal.desc",
947 "%d\n%s\n" % (len(self), desc))
947 "%d\n%s\n" % (len(self), desc))
948 self.opener.write("journal.bookmarks",
948 self.opener.write("journal.bookmarks",
949 self.opener.tryread("bookmarks"))
949 self.opener.tryread("bookmarks"))
950 self.sopener.write("journal.phaseroots",
950 self.sopener.write("journal.phaseroots",
951 self.sopener.tryread("phaseroots"))
951 self.sopener.tryread("phaseroots"))
952
952
953 def recover(self):
953 def recover(self):
954 lock = self.lock()
954 lock = self.lock()
955 try:
955 try:
956 if os.path.exists(self.sjoin("journal")):
956 if os.path.exists(self.sjoin("journal")):
957 self.ui.status(_("rolling back interrupted transaction\n"))
957 self.ui.status(_("rolling back interrupted transaction\n"))
958 transaction.rollback(self.sopener, self.sjoin("journal"),
958 transaction.rollback(self.sopener, self.sjoin("journal"),
959 self.ui.warn)
959 self.ui.warn)
960 self.invalidate()
960 self.invalidate()
961 return True
961 return True
962 else:
962 else:
963 self.ui.warn(_("no interrupted transaction available\n"))
963 self.ui.warn(_("no interrupted transaction available\n"))
964 return False
964 return False
965 finally:
965 finally:
966 lock.release()
966 lock.release()
967
967
968 def rollback(self, dryrun=False, force=False):
968 def rollback(self, dryrun=False, force=False):
969 wlock = lock = None
969 wlock = lock = None
970 try:
970 try:
971 wlock = self.wlock()
971 wlock = self.wlock()
972 lock = self.lock()
972 lock = self.lock()
973 if os.path.exists(self.sjoin("undo")):
973 if os.path.exists(self.sjoin("undo")):
974 return self._rollback(dryrun, force)
974 return self._rollback(dryrun, force)
975 else:
975 else:
976 self.ui.warn(_("no rollback information available\n"))
976 self.ui.warn(_("no rollback information available\n"))
977 return 1
977 return 1
978 finally:
978 finally:
979 release(lock, wlock)
979 release(lock, wlock)
980
980
981 @unfilteredmeth # Until we get smarter cache management
981 @unfilteredmeth # Until we get smarter cache management
982 def _rollback(self, dryrun, force):
982 def _rollback(self, dryrun, force):
983 ui = self.ui
983 ui = self.ui
984 try:
984 try:
985 args = self.opener.read('undo.desc').splitlines()
985 args = self.opener.read('undo.desc').splitlines()
986 (oldlen, desc, detail) = (int(args[0]), args[1], None)
986 (oldlen, desc, detail) = (int(args[0]), args[1], None)
987 if len(args) >= 3:
987 if len(args) >= 3:
988 detail = args[2]
988 detail = args[2]
989 oldtip = oldlen - 1
989 oldtip = oldlen - 1
990
990
991 if detail and ui.verbose:
991 if detail and ui.verbose:
992 msg = (_('repository tip rolled back to revision %s'
992 msg = (_('repository tip rolled back to revision %s'
993 ' (undo %s: %s)\n')
993 ' (undo %s: %s)\n')
994 % (oldtip, desc, detail))
994 % (oldtip, desc, detail))
995 else:
995 else:
996 msg = (_('repository tip rolled back to revision %s'
996 msg = (_('repository tip rolled back to revision %s'
997 ' (undo %s)\n')
997 ' (undo %s)\n')
998 % (oldtip, desc))
998 % (oldtip, desc))
999 except IOError:
999 except IOError:
1000 msg = _('rolling back unknown transaction\n')
1000 msg = _('rolling back unknown transaction\n')
1001 desc = None
1001 desc = None
1002
1002
1003 if not force and self['.'] != self['tip'] and desc == 'commit':
1003 if not force and self['.'] != self['tip'] and desc == 'commit':
1004 raise util.Abort(
1004 raise util.Abort(
1005 _('rollback of last commit while not checked out '
1005 _('rollback of last commit while not checked out '
1006 'may lose data'), hint=_('use -f to force'))
1006 'may lose data'), hint=_('use -f to force'))
1007
1007
1008 ui.status(msg)
1008 ui.status(msg)
1009 if dryrun:
1009 if dryrun:
1010 return 0
1010 return 0
1011
1011
1012 parents = self.dirstate.parents()
1012 parents = self.dirstate.parents()
1013 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1013 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1014 if os.path.exists(self.join('undo.bookmarks')):
1014 if os.path.exists(self.join('undo.bookmarks')):
1015 util.rename(self.join('undo.bookmarks'),
1015 util.rename(self.join('undo.bookmarks'),
1016 self.join('bookmarks'))
1016 self.join('bookmarks'))
1017 if os.path.exists(self.sjoin('undo.phaseroots')):
1017 if os.path.exists(self.sjoin('undo.phaseroots')):
1018 util.rename(self.sjoin('undo.phaseroots'),
1018 util.rename(self.sjoin('undo.phaseroots'),
1019 self.sjoin('phaseroots'))
1019 self.sjoin('phaseroots'))
1020 self.invalidate()
1020 self.invalidate()
1021
1021
1022 # Discard all cache entries to force reloading everything.
1022 # Discard all cache entries to force reloading everything.
1023 self._filecache.clear()
1023 self._filecache.clear()
1024
1024
1025 parentgone = (parents[0] not in self.changelog.nodemap or
1025 parentgone = (parents[0] not in self.changelog.nodemap or
1026 parents[1] not in self.changelog.nodemap)
1026 parents[1] not in self.changelog.nodemap)
1027 if parentgone:
1027 if parentgone:
1028 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1028 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1029 try:
1029 try:
1030 branch = self.opener.read('undo.branch')
1030 branch = self.opener.read('undo.branch')
1031 self.dirstate.setbranch(encoding.tolocal(branch))
1031 self.dirstate.setbranch(encoding.tolocal(branch))
1032 except IOError:
1032 except IOError:
1033 ui.warn(_('named branch could not be reset: '
1033 ui.warn(_('named branch could not be reset: '
1034 'current branch is still \'%s\'\n')
1034 'current branch is still \'%s\'\n')
1035 % self.dirstate.branch())
1035 % self.dirstate.branch())
1036
1036
1037 self.dirstate.invalidate()
1037 self.dirstate.invalidate()
1038 parents = tuple([p.rev() for p in self.parents()])
1038 parents = tuple([p.rev() for p in self.parents()])
1039 if len(parents) > 1:
1039 if len(parents) > 1:
1040 ui.status(_('working directory now based on '
1040 ui.status(_('working directory now based on '
1041 'revisions %d and %d\n') % parents)
1041 'revisions %d and %d\n') % parents)
1042 else:
1042 else:
1043 ui.status(_('working directory now based on '
1043 ui.status(_('working directory now based on '
1044 'revision %d\n') % parents)
1044 'revision %d\n') % parents)
1045 # TODO: if we know which new heads may result from this rollback, pass
1045 # TODO: if we know which new heads may result from this rollback, pass
1046 # them to destroy(), which will prevent the branchhead cache from being
1046 # them to destroy(), which will prevent the branchhead cache from being
1047 # invalidated.
1047 # invalidated.
1048 self.destroyed()
1048 self.destroyed()
1049 return 0
1049 return 0
1050
1050
1051 def invalidatecaches(self):
1051 def invalidatecaches(self):
1052 def delcache(name):
1052 def delcache(name):
1053 try:
1053 try:
1054 delattr(self, name)
1054 delattr(self, name)
1055 except AttributeError:
1055 except AttributeError:
1056 pass
1056 pass
1057
1057
1058 delcache('_tagscache')
1058 delcache('_tagscache')
1059
1059
1060 self.unfiltered()._branchcache = None # in UTF-8
1060 self.unfiltered()._branchcache = None # in UTF-8
1061 self.unfiltered()._branchcachetip = None
1061 self.unfiltered()._branchcachetip = None
1062 obsolete.clearobscaches(self)
1062 obsolete.clearobscaches(self)
1063
1063
1064 def invalidatedirstate(self):
1064 def invalidatedirstate(self):
1065 '''Invalidates the dirstate, causing the next call to dirstate
1065 '''Invalidates the dirstate, causing the next call to dirstate
1066 to check if it was modified since the last time it was read,
1066 to check if it was modified since the last time it was read,
1067 rereading it if it has.
1067 rereading it if it has.
1068
1068
1069 This is different to dirstate.invalidate() that it doesn't always
1069 This is different to dirstate.invalidate() that it doesn't always
1070 rereads the dirstate. Use dirstate.invalidate() if you want to
1070 rereads the dirstate. Use dirstate.invalidate() if you want to
1071 explicitly read the dirstate again (i.e. restoring it to a previous
1071 explicitly read the dirstate again (i.e. restoring it to a previous
1072 known good state).'''
1072 known good state).'''
1073 if 'dirstate' in self.__dict__:
1073 if 'dirstate' in self.__dict__:
1074 for k in self.dirstate._filecache:
1074 for k in self.dirstate._filecache:
1075 try:
1075 try:
1076 delattr(self.dirstate, k)
1076 delattr(self.dirstate, k)
1077 except AttributeError:
1077 except AttributeError:
1078 pass
1078 pass
1079 delattr(self.unfiltered(), 'dirstate')
1079 delattr(self.unfiltered(), 'dirstate')
1080
1080
1081 def invalidate(self):
1081 def invalidate(self):
1082 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
1082 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
1083 for k in self._filecache:
1083 for k in self._filecache:
1084 # dirstate is invalidated separately in invalidatedirstate()
1084 # dirstate is invalidated separately in invalidatedirstate()
1085 if k == 'dirstate':
1085 if k == 'dirstate':
1086 continue
1086 continue
1087
1087
1088 try:
1088 try:
1089 delattr(unfiltered, k)
1089 delattr(unfiltered, k)
1090 except AttributeError:
1090 except AttributeError:
1091 pass
1091 pass
1092 self.invalidatecaches()
1092 self.invalidatecaches()
1093
1093
1094 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1094 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1095 try:
1095 try:
1096 l = lock.lock(lockname, 0, releasefn, desc=desc)
1096 l = lock.lock(lockname, 0, releasefn, desc=desc)
1097 except error.LockHeld, inst:
1097 except error.LockHeld, inst:
1098 if not wait:
1098 if not wait:
1099 raise
1099 raise
1100 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1100 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1101 (desc, inst.locker))
1101 (desc, inst.locker))
1102 # default to 600 seconds timeout
1102 # default to 600 seconds timeout
1103 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1103 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1104 releasefn, desc=desc)
1104 releasefn, desc=desc)
1105 if acquirefn:
1105 if acquirefn:
1106 acquirefn()
1106 acquirefn()
1107 return l
1107 return l
1108
1108
1109 def _afterlock(self, callback):
1109 def _afterlock(self, callback):
1110 """add a callback to the current repository lock.
1110 """add a callback to the current repository lock.
1111
1111
1112 The callback will be executed on lock release."""
1112 The callback will be executed on lock release."""
1113 l = self._lockref and self._lockref()
1113 l = self._lockref and self._lockref()
1114 if l:
1114 if l:
1115 l.postrelease.append(callback)
1115 l.postrelease.append(callback)
1116 else:
1116 else:
1117 callback()
1117 callback()
1118
1118
1119 def lock(self, wait=True):
1119 def lock(self, wait=True):
1120 '''Lock the repository store (.hg/store) and return a weak reference
1120 '''Lock the repository store (.hg/store) and return a weak reference
1121 to the lock. Use this before modifying the store (e.g. committing or
1121 to the lock. Use this before modifying the store (e.g. committing or
1122 stripping). If you are opening a transaction, get a lock as well.)'''
1122 stripping). If you are opening a transaction, get a lock as well.)'''
1123 l = self._lockref and self._lockref()
1123 l = self._lockref and self._lockref()
1124 if l is not None and l.held:
1124 if l is not None and l.held:
1125 l.lock()
1125 l.lock()
1126 return l
1126 return l
1127
1127
1128 def unlock():
1128 def unlock():
1129 self.store.write()
1129 self.store.write()
1130 if '_phasecache' in vars(self):
1130 if '_phasecache' in vars(self):
1131 self._phasecache.write()
1131 self._phasecache.write()
1132 for k, ce in self._filecache.items():
1132 for k, ce in self._filecache.items():
1133 if k == 'dirstate':
1133 if k == 'dirstate':
1134 continue
1134 continue
1135 ce.refresh()
1135 ce.refresh()
1136
1136
1137 l = self._lock(self.sjoin("lock"), wait, unlock,
1137 l = self._lock(self.sjoin("lock"), wait, unlock,
1138 self.invalidate, _('repository %s') % self.origroot)
1138 self.invalidate, _('repository %s') % self.origroot)
1139 self._lockref = weakref.ref(l)
1139 self._lockref = weakref.ref(l)
1140 return l
1140 return l
1141
1141
1142 def wlock(self, wait=True):
1142 def wlock(self, wait=True):
1143 '''Lock the non-store parts of the repository (everything under
1143 '''Lock the non-store parts of the repository (everything under
1144 .hg except .hg/store) and return a weak reference to the lock.
1144 .hg except .hg/store) and return a weak reference to the lock.
1145 Use this before modifying files in .hg.'''
1145 Use this before modifying files in .hg.'''
1146 l = self._wlockref and self._wlockref()
1146 l = self._wlockref and self._wlockref()
1147 if l is not None and l.held:
1147 if l is not None and l.held:
1148 l.lock()
1148 l.lock()
1149 return l
1149 return l
1150
1150
1151 def unlock():
1151 def unlock():
1152 self.dirstate.write()
1152 self.dirstate.write()
1153 ce = self._filecache.get('dirstate')
1153 ce = self._filecache.get('dirstate')
1154 if ce:
1154 if ce:
1155 ce.refresh()
1155 ce.refresh()
1156
1156
1157 l = self._lock(self.join("wlock"), wait, unlock,
1157 l = self._lock(self.join("wlock"), wait, unlock,
1158 self.invalidatedirstate, _('working directory of %s') %
1158 self.invalidatedirstate, _('working directory of %s') %
1159 self.origroot)
1159 self.origroot)
1160 self._wlockref = weakref.ref(l)
1160 self._wlockref = weakref.ref(l)
1161 return l
1161 return l
1162
1162
1163 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1163 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1164 """
1164 """
1165 commit an individual file as part of a larger transaction
1165 commit an individual file as part of a larger transaction
1166 """
1166 """
1167
1167
1168 fname = fctx.path()
1168 fname = fctx.path()
1169 text = fctx.data()
1169 text = fctx.data()
1170 flog = self.file(fname)
1170 flog = self.file(fname)
1171 fparent1 = manifest1.get(fname, nullid)
1171 fparent1 = manifest1.get(fname, nullid)
1172 fparent2 = fparent2o = manifest2.get(fname, nullid)
1172 fparent2 = fparent2o = manifest2.get(fname, nullid)
1173
1173
1174 meta = {}
1174 meta = {}
1175 copy = fctx.renamed()
1175 copy = fctx.renamed()
1176 if copy and copy[0] != fname:
1176 if copy and copy[0] != fname:
1177 # Mark the new revision of this file as a copy of another
1177 # Mark the new revision of this file as a copy of another
1178 # file. This copy data will effectively act as a parent
1178 # file. This copy data will effectively act as a parent
1179 # of this new revision. If this is a merge, the first
1179 # of this new revision. If this is a merge, the first
1180 # parent will be the nullid (meaning "look up the copy data")
1180 # parent will be the nullid (meaning "look up the copy data")
1181 # and the second one will be the other parent. For example:
1181 # and the second one will be the other parent. For example:
1182 #
1182 #
1183 # 0 --- 1 --- 3 rev1 changes file foo
1183 # 0 --- 1 --- 3 rev1 changes file foo
1184 # \ / rev2 renames foo to bar and changes it
1184 # \ / rev2 renames foo to bar and changes it
1185 # \- 2 -/ rev3 should have bar with all changes and
1185 # \- 2 -/ rev3 should have bar with all changes and
1186 # should record that bar descends from
1186 # should record that bar descends from
1187 # bar in rev2 and foo in rev1
1187 # bar in rev2 and foo in rev1
1188 #
1188 #
1189 # this allows this merge to succeed:
1189 # this allows this merge to succeed:
1190 #
1190 #
1191 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1191 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1192 # \ / merging rev3 and rev4 should use bar@rev2
1192 # \ / merging rev3 and rev4 should use bar@rev2
1193 # \- 2 --- 4 as the merge base
1193 # \- 2 --- 4 as the merge base
1194 #
1194 #
1195
1195
1196 cfname = copy[0]
1196 cfname = copy[0]
1197 crev = manifest1.get(cfname)
1197 crev = manifest1.get(cfname)
1198 newfparent = fparent2
1198 newfparent = fparent2
1199
1199
1200 if manifest2: # branch merge
1200 if manifest2: # branch merge
1201 if fparent2 == nullid or crev is None: # copied on remote side
1201 if fparent2 == nullid or crev is None: # copied on remote side
1202 if cfname in manifest2:
1202 if cfname in manifest2:
1203 crev = manifest2[cfname]
1203 crev = manifest2[cfname]
1204 newfparent = fparent1
1204 newfparent = fparent1
1205
1205
1206 # find source in nearest ancestor if we've lost track
1206 # find source in nearest ancestor if we've lost track
1207 if not crev:
1207 if not crev:
1208 self.ui.debug(" %s: searching for copy revision for %s\n" %
1208 self.ui.debug(" %s: searching for copy revision for %s\n" %
1209 (fname, cfname))
1209 (fname, cfname))
1210 for ancestor in self[None].ancestors():
1210 for ancestor in self[None].ancestors():
1211 if cfname in ancestor:
1211 if cfname in ancestor:
1212 crev = ancestor[cfname].filenode()
1212 crev = ancestor[cfname].filenode()
1213 break
1213 break
1214
1214
1215 if crev:
1215 if crev:
1216 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1216 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1217 meta["copy"] = cfname
1217 meta["copy"] = cfname
1218 meta["copyrev"] = hex(crev)
1218 meta["copyrev"] = hex(crev)
1219 fparent1, fparent2 = nullid, newfparent
1219 fparent1, fparent2 = nullid, newfparent
1220 else:
1220 else:
1221 self.ui.warn(_("warning: can't find ancestor for '%s' "
1221 self.ui.warn(_("warning: can't find ancestor for '%s' "
1222 "copied from '%s'!\n") % (fname, cfname))
1222 "copied from '%s'!\n") % (fname, cfname))
1223
1223
1224 elif fparent2 != nullid:
1224 elif fparent2 != nullid:
1225 # is one parent an ancestor of the other?
1225 # is one parent an ancestor of the other?
1226 fparentancestor = flog.ancestor(fparent1, fparent2)
1226 fparentancestor = flog.ancestor(fparent1, fparent2)
1227 if fparentancestor == fparent1:
1227 if fparentancestor == fparent1:
1228 fparent1, fparent2 = fparent2, nullid
1228 fparent1, fparent2 = fparent2, nullid
1229 elif fparentancestor == fparent2:
1229 elif fparentancestor == fparent2:
1230 fparent2 = nullid
1230 fparent2 = nullid
1231
1231
1232 # is the file changed?
1232 # is the file changed?
1233 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1233 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1234 changelist.append(fname)
1234 changelist.append(fname)
1235 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1235 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1236
1236
1237 # are just the flags changed during merge?
1237 # are just the flags changed during merge?
1238 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1238 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1239 changelist.append(fname)
1239 changelist.append(fname)
1240
1240
1241 return fparent1
1241 return fparent1
1242
1242
1243 def commit(self, text="", user=None, date=None, match=None, force=False,
1243 def commit(self, text="", user=None, date=None, match=None, force=False,
1244 editor=False, extra={}):
1244 editor=False, extra={}):
1245 """Add a new revision to current repository.
1245 """Add a new revision to current repository.
1246
1246
1247 Revision information is gathered from the working directory,
1247 Revision information is gathered from the working directory,
1248 match can be used to filter the committed files. If editor is
1248 match can be used to filter the committed files. If editor is
1249 supplied, it is called to get a commit message.
1249 supplied, it is called to get a commit message.
1250 """
1250 """
1251
1251
1252 def fail(f, msg):
1252 def fail(f, msg):
1253 raise util.Abort('%s: %s' % (f, msg))
1253 raise util.Abort('%s: %s' % (f, msg))
1254
1254
1255 if not match:
1255 if not match:
1256 match = matchmod.always(self.root, '')
1256 match = matchmod.always(self.root, '')
1257
1257
1258 if not force:
1258 if not force:
1259 vdirs = []
1259 vdirs = []
1260 match.dir = vdirs.append
1260 match.dir = vdirs.append
1261 match.bad = fail
1261 match.bad = fail
1262
1262
1263 wlock = self.wlock()
1263 wlock = self.wlock()
1264 try:
1264 try:
1265 wctx = self[None]
1265 wctx = self[None]
1266 merge = len(wctx.parents()) > 1
1266 merge = len(wctx.parents()) > 1
1267
1267
1268 if (not force and merge and match and
1268 if (not force and merge and match and
1269 (match.files() or match.anypats())):
1269 (match.files() or match.anypats())):
1270 raise util.Abort(_('cannot partially commit a merge '
1270 raise util.Abort(_('cannot partially commit a merge '
1271 '(do not specify files or patterns)'))
1271 '(do not specify files or patterns)'))
1272
1272
1273 changes = self.status(match=match, clean=force)
1273 changes = self.status(match=match, clean=force)
1274 if force:
1274 if force:
1275 changes[0].extend(changes[6]) # mq may commit unchanged files
1275 changes[0].extend(changes[6]) # mq may commit unchanged files
1276
1276
1277 # check subrepos
1277 # check subrepos
1278 subs = []
1278 subs = []
1279 commitsubs = set()
1279 commitsubs = set()
1280 newstate = wctx.substate.copy()
1280 newstate = wctx.substate.copy()
1281 # only manage subrepos and .hgsubstate if .hgsub is present
1281 # only manage subrepos and .hgsubstate if .hgsub is present
1282 if '.hgsub' in wctx:
1282 if '.hgsub' in wctx:
1283 # we'll decide whether to track this ourselves, thanks
1283 # we'll decide whether to track this ourselves, thanks
1284 if '.hgsubstate' in changes[0]:
1284 if '.hgsubstate' in changes[0]:
1285 changes[0].remove('.hgsubstate')
1285 changes[0].remove('.hgsubstate')
1286 if '.hgsubstate' in changes[2]:
1286 if '.hgsubstate' in changes[2]:
1287 changes[2].remove('.hgsubstate')
1287 changes[2].remove('.hgsubstate')
1288
1288
1289 # compare current state to last committed state
1289 # compare current state to last committed state
1290 # build new substate based on last committed state
1290 # build new substate based on last committed state
1291 oldstate = wctx.p1().substate
1291 oldstate = wctx.p1().substate
1292 for s in sorted(newstate.keys()):
1292 for s in sorted(newstate.keys()):
1293 if not match(s):
1293 if not match(s):
1294 # ignore working copy, use old state if present
1294 # ignore working copy, use old state if present
1295 if s in oldstate:
1295 if s in oldstate:
1296 newstate[s] = oldstate[s]
1296 newstate[s] = oldstate[s]
1297 continue
1297 continue
1298 if not force:
1298 if not force:
1299 raise util.Abort(
1299 raise util.Abort(
1300 _("commit with new subrepo %s excluded") % s)
1300 _("commit with new subrepo %s excluded") % s)
1301 if wctx.sub(s).dirty(True):
1301 if wctx.sub(s).dirty(True):
1302 if not self.ui.configbool('ui', 'commitsubrepos'):
1302 if not self.ui.configbool('ui', 'commitsubrepos'):
1303 raise util.Abort(
1303 raise util.Abort(
1304 _("uncommitted changes in subrepo %s") % s,
1304 _("uncommitted changes in subrepo %s") % s,
1305 hint=_("use --subrepos for recursive commit"))
1305 hint=_("use --subrepos for recursive commit"))
1306 subs.append(s)
1306 subs.append(s)
1307 commitsubs.add(s)
1307 commitsubs.add(s)
1308 else:
1308 else:
1309 bs = wctx.sub(s).basestate()
1309 bs = wctx.sub(s).basestate()
1310 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1310 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1311 if oldstate.get(s, (None, None, None))[1] != bs:
1311 if oldstate.get(s, (None, None, None))[1] != bs:
1312 subs.append(s)
1312 subs.append(s)
1313
1313
1314 # check for removed subrepos
1314 # check for removed subrepos
1315 for p in wctx.parents():
1315 for p in wctx.parents():
1316 r = [s for s in p.substate if s not in newstate]
1316 r = [s for s in p.substate if s not in newstate]
1317 subs += [s for s in r if match(s)]
1317 subs += [s for s in r if match(s)]
1318 if subs:
1318 if subs:
1319 if (not match('.hgsub') and
1319 if (not match('.hgsub') and
1320 '.hgsub' in (wctx.modified() + wctx.added())):
1320 '.hgsub' in (wctx.modified() + wctx.added())):
1321 raise util.Abort(
1321 raise util.Abort(
1322 _("can't commit subrepos without .hgsub"))
1322 _("can't commit subrepos without .hgsub"))
1323 changes[0].insert(0, '.hgsubstate')
1323 changes[0].insert(0, '.hgsubstate')
1324
1324
1325 elif '.hgsub' in changes[2]:
1325 elif '.hgsub' in changes[2]:
1326 # clean up .hgsubstate when .hgsub is removed
1326 # clean up .hgsubstate when .hgsub is removed
1327 if ('.hgsubstate' in wctx and
1327 if ('.hgsubstate' in wctx and
1328 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1328 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1329 changes[2].insert(0, '.hgsubstate')
1329 changes[2].insert(0, '.hgsubstate')
1330
1330
1331 # make sure all explicit patterns are matched
1331 # make sure all explicit patterns are matched
1332 if not force and match.files():
1332 if not force and match.files():
1333 matched = set(changes[0] + changes[1] + changes[2])
1333 matched = set(changes[0] + changes[1] + changes[2])
1334
1334
1335 for f in match.files():
1335 for f in match.files():
1336 f = self.dirstate.normalize(f)
1336 f = self.dirstate.normalize(f)
1337 if f == '.' or f in matched or f in wctx.substate:
1337 if f == '.' or f in matched or f in wctx.substate:
1338 continue
1338 continue
1339 if f in changes[3]: # missing
1339 if f in changes[3]: # missing
1340 fail(f, _('file not found!'))
1340 fail(f, _('file not found!'))
1341 if f in vdirs: # visited directory
1341 if f in vdirs: # visited directory
1342 d = f + '/'
1342 d = f + '/'
1343 for mf in matched:
1343 for mf in matched:
1344 if mf.startswith(d):
1344 if mf.startswith(d):
1345 break
1345 break
1346 else:
1346 else:
1347 fail(f, _("no match under directory!"))
1347 fail(f, _("no match under directory!"))
1348 elif f not in self.dirstate:
1348 elif f not in self.dirstate:
1349 fail(f, _("file not tracked!"))
1349 fail(f, _("file not tracked!"))
1350
1350
1351 if (not force and not extra.get("close") and not merge
1351 if (not force and not extra.get("close") and not merge
1352 and not (changes[0] or changes[1] or changes[2])
1352 and not (changes[0] or changes[1] or changes[2])
1353 and wctx.branch() == wctx.p1().branch()):
1353 and wctx.branch() == wctx.p1().branch()):
1354 return None
1354 return None
1355
1355
1356 if merge and changes[3]:
1356 if merge and changes[3]:
1357 raise util.Abort(_("cannot commit merge with missing files"))
1357 raise util.Abort(_("cannot commit merge with missing files"))
1358
1358
1359 ms = mergemod.mergestate(self)
1359 ms = mergemod.mergestate(self)
1360 for f in changes[0]:
1360 for f in changes[0]:
1361 if f in ms and ms[f] == 'u':
1361 if f in ms and ms[f] == 'u':
1362 raise util.Abort(_("unresolved merge conflicts "
1362 raise util.Abort(_("unresolved merge conflicts "
1363 "(see hg help resolve)"))
1363 "(see hg help resolve)"))
1364
1364
1365 cctx = context.workingctx(self, text, user, date, extra, changes)
1365 cctx = context.workingctx(self, text, user, date, extra, changes)
1366 if editor:
1366 if editor:
1367 cctx._text = editor(self, cctx, subs)
1367 cctx._text = editor(self, cctx, subs)
1368 edited = (text != cctx._text)
1368 edited = (text != cctx._text)
1369
1369
1370 # commit subs and write new state
1370 # commit subs and write new state
1371 if subs:
1371 if subs:
1372 for s in sorted(commitsubs):
1372 for s in sorted(commitsubs):
1373 sub = wctx.sub(s)
1373 sub = wctx.sub(s)
1374 self.ui.status(_('committing subrepository %s\n') %
1374 self.ui.status(_('committing subrepository %s\n') %
1375 subrepo.subrelpath(sub))
1375 subrepo.subrelpath(sub))
1376 sr = sub.commit(cctx._text, user, date)
1376 sr = sub.commit(cctx._text, user, date)
1377 newstate[s] = (newstate[s][0], sr)
1377 newstate[s] = (newstate[s][0], sr)
1378 subrepo.writestate(self, newstate)
1378 subrepo.writestate(self, newstate)
1379
1379
1380 # Save commit message in case this transaction gets rolled back
1380 # Save commit message in case this transaction gets rolled back
1381 # (e.g. by a pretxncommit hook). Leave the content alone on
1381 # (e.g. by a pretxncommit hook). Leave the content alone on
1382 # the assumption that the user will use the same editor again.
1382 # the assumption that the user will use the same editor again.
1383 msgfn = self.savecommitmessage(cctx._text)
1383 msgfn = self.savecommitmessage(cctx._text)
1384
1384
1385 p1, p2 = self.dirstate.parents()
1385 p1, p2 = self.dirstate.parents()
1386 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1386 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1387 try:
1387 try:
1388 self.hook("precommit", throw=True, parent1=hookp1,
1388 self.hook("precommit", throw=True, parent1=hookp1,
1389 parent2=hookp2)
1389 parent2=hookp2)
1390 ret = self.commitctx(cctx, True)
1390 ret = self.commitctx(cctx, True)
1391 except: # re-raises
1391 except: # re-raises
1392 if edited:
1392 if edited:
1393 self.ui.write(
1393 self.ui.write(
1394 _('note: commit message saved in %s\n') % msgfn)
1394 _('note: commit message saved in %s\n') % msgfn)
1395 raise
1395 raise
1396
1396
1397 # update bookmarks, dirstate and mergestate
1397 # update bookmarks, dirstate and mergestate
1398 bookmarks.update(self, [p1, p2], ret)
1398 bookmarks.update(self, [p1, p2], ret)
1399 for f in changes[0] + changes[1]:
1399 for f in changes[0] + changes[1]:
1400 self.dirstate.normal(f)
1400 self.dirstate.normal(f)
1401 for f in changes[2]:
1401 for f in changes[2]:
1402 self.dirstate.drop(f)
1402 self.dirstate.drop(f)
1403 self.dirstate.setparents(ret)
1403 self.dirstate.setparents(ret)
1404 ms.reset()
1404 ms.reset()
1405 finally:
1405 finally:
1406 wlock.release()
1406 wlock.release()
1407
1407
1408 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1408 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1409 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1409 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1410 self._afterlock(commithook)
1410 self._afterlock(commithook)
1411 return ret
1411 return ret
1412
1412
1413 def commitctx(self, ctx, error=False):
1413 def commitctx(self, ctx, error=False):
1414 """Add a new revision to current repository.
1414 """Add a new revision to current repository.
1415 Revision information is passed via the context argument.
1415 Revision information is passed via the context argument.
1416 """
1416 """
1417
1417
1418 tr = lock = None
1418 tr = lock = None
1419 removed = list(ctx.removed())
1419 removed = list(ctx.removed())
1420 p1, p2 = ctx.p1(), ctx.p2()
1420 p1, p2 = ctx.p1(), ctx.p2()
1421 user = ctx.user()
1421 user = ctx.user()
1422
1422
1423 lock = self.lock()
1423 lock = self.lock()
1424 try:
1424 try:
1425 tr = self.transaction("commit")
1425 tr = self.transaction("commit")
1426 trp = weakref.proxy(tr)
1426 trp = weakref.proxy(tr)
1427
1427
1428 if ctx.files():
1428 if ctx.files():
1429 m1 = p1.manifest().copy()
1429 m1 = p1.manifest().copy()
1430 m2 = p2.manifest()
1430 m2 = p2.manifest()
1431
1431
1432 # check in files
1432 # check in files
1433 new = {}
1433 new = {}
1434 changed = []
1434 changed = []
1435 linkrev = len(self)
1435 linkrev = len(self)
1436 for f in sorted(ctx.modified() + ctx.added()):
1436 for f in sorted(ctx.modified() + ctx.added()):
1437 self.ui.note(f + "\n")
1437 self.ui.note(f + "\n")
1438 try:
1438 try:
1439 fctx = ctx[f]
1439 fctx = ctx[f]
1440 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1440 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1441 changed)
1441 changed)
1442 m1.set(f, fctx.flags())
1442 m1.set(f, fctx.flags())
1443 except OSError, inst:
1443 except OSError, inst:
1444 self.ui.warn(_("trouble committing %s!\n") % f)
1444 self.ui.warn(_("trouble committing %s!\n") % f)
1445 raise
1445 raise
1446 except IOError, inst:
1446 except IOError, inst:
1447 errcode = getattr(inst, 'errno', errno.ENOENT)
1447 errcode = getattr(inst, 'errno', errno.ENOENT)
1448 if error or errcode and errcode != errno.ENOENT:
1448 if error or errcode and errcode != errno.ENOENT:
1449 self.ui.warn(_("trouble committing %s!\n") % f)
1449 self.ui.warn(_("trouble committing %s!\n") % f)
1450 raise
1450 raise
1451 else:
1451 else:
1452 removed.append(f)
1452 removed.append(f)
1453
1453
1454 # update manifest
1454 # update manifest
1455 m1.update(new)
1455 m1.update(new)
1456 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1456 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1457 drop = [f for f in removed if f in m1]
1457 drop = [f for f in removed if f in m1]
1458 for f in drop:
1458 for f in drop:
1459 del m1[f]
1459 del m1[f]
1460 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1460 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1461 p2.manifestnode(), (new, drop))
1461 p2.manifestnode(), (new, drop))
1462 files = changed + removed
1462 files = changed + removed
1463 else:
1463 else:
1464 mn = p1.manifestnode()
1464 mn = p1.manifestnode()
1465 files = []
1465 files = []
1466
1466
1467 # update changelog
1467 # update changelog
1468 self.changelog.delayupdate()
1468 self.changelog.delayupdate()
1469 n = self.changelog.add(mn, files, ctx.description(),
1469 n = self.changelog.add(mn, files, ctx.description(),
1470 trp, p1.node(), p2.node(),
1470 trp, p1.node(), p2.node(),
1471 user, ctx.date(), ctx.extra().copy())
1471 user, ctx.date(), ctx.extra().copy())
1472 p = lambda: self.changelog.writepending() and self.root or ""
1472 p = lambda: self.changelog.writepending() and self.root or ""
1473 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1473 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1474 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1474 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1475 parent2=xp2, pending=p)
1475 parent2=xp2, pending=p)
1476 self.changelog.finalize(trp)
1476 self.changelog.finalize(trp)
1477 # set the new commit is proper phase
1477 # set the new commit is proper phase
1478 targetphase = phases.newcommitphase(self.ui)
1478 targetphase = phases.newcommitphase(self.ui)
1479 if targetphase:
1479 if targetphase:
1480 # retract boundary do not alter parent changeset.
1480 # retract boundary do not alter parent changeset.
1481 # if a parent have higher the resulting phase will
1481 # if a parent have higher the resulting phase will
1482 # be compliant anyway
1482 # be compliant anyway
1483 #
1483 #
1484 # if minimal phase was 0 we don't need to retract anything
1484 # if minimal phase was 0 we don't need to retract anything
1485 phases.retractboundary(self, targetphase, [n])
1485 phases.retractboundary(self, targetphase, [n])
1486 tr.close()
1486 tr.close()
1487 self.updatebranchcache()
1487 self.updatebranchcache()
1488 return n
1488 return n
1489 finally:
1489 finally:
1490 if tr:
1490 if tr:
1491 tr.release()
1491 tr.release()
1492 lock.release()
1492 lock.release()
1493
1493
1494 @unfilteredmeth
1494 @unfilteredmeth
1495 def destroyed(self, newheadnodes=None):
1495 def destroyed(self, newheadnodes=None):
1496 '''Inform the repository that nodes have been destroyed.
1496 '''Inform the repository that nodes have been destroyed.
1497 Intended for use by strip and rollback, so there's a common
1497 Intended for use by strip and rollback, so there's a common
1498 place for anything that has to be done after destroying history.
1498 place for anything that has to be done after destroying history.
1499
1499
1500 If you know the branchheadcache was uptodate before nodes were removed
1500 If you know the branchheadcache was uptodate before nodes were removed
1501 and you also know the set of candidate new heads that may have resulted
1501 and you also know the set of candidate new heads that may have resulted
1502 from the destruction, you can set newheadnodes. This will enable the
1502 from the destruction, you can set newheadnodes. This will enable the
1503 code to update the branchheads cache, rather than having future code
1503 code to update the branchheads cache, rather than having future code
1504 decide it's invalid and regenerating it from scratch.
1504 decide it's invalid and regenerating it from scratch.
1505 '''
1505 '''
1506 # If we have info, newheadnodes, on how to update the branch cache, do
1506 # If we have info, newheadnodes, on how to update the branch cache, do
1507 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1507 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1508 # will be caught the next time it is read.
1508 # will be caught the next time it is read.
1509 if newheadnodes:
1509 if newheadnodes:
1510 tiprev = len(self) - 1
1510 tiprev = len(self) - 1
1511 ctxgen = (self[node] for node in newheadnodes
1511 ctxgen = (self[node] for node in newheadnodes
1512 if self.changelog.hasnode(node))
1512 if self.changelog.hasnode(node))
1513 self._updatebranchcache(self._branchcache, ctxgen)
1513 self._updatebranchcache(self._branchcache, ctxgen)
1514 self._writebranchcache(self._branchcache, self.changelog.tip(),
1514 self._writebranchcache(self._branchcache, self.changelog.tip(),
1515 tiprev)
1515 tiprev)
1516
1516
1517 # Ensure the persistent tag cache is updated. Doing it now
1517 # Ensure the persistent tag cache is updated. Doing it now
1518 # means that the tag cache only has to worry about destroyed
1518 # means that the tag cache only has to worry about destroyed
1519 # heads immediately after a strip/rollback. That in turn
1519 # heads immediately after a strip/rollback. That in turn
1520 # guarantees that "cachetip == currenttip" (comparing both rev
1520 # guarantees that "cachetip == currenttip" (comparing both rev
1521 # and node) always means no nodes have been added or destroyed.
1521 # and node) always means no nodes have been added or destroyed.
1522
1522
1523 # XXX this is suboptimal when qrefresh'ing: we strip the current
1523 # XXX this is suboptimal when qrefresh'ing: we strip the current
1524 # head, refresh the tag cache, then immediately add a new head.
1524 # head, refresh the tag cache, then immediately add a new head.
1525 # But I think doing it this way is necessary for the "instant
1525 # But I think doing it this way is necessary for the "instant
1526 # tag cache retrieval" case to work.
1526 # tag cache retrieval" case to work.
1527 self.invalidatecaches()
1527 self.invalidatecaches()
1528
1528
1529 # Discard all cache entries to force reloading everything.
1529 # Discard all cache entries to force reloading everything.
1530 self._filecache.clear()
1530 self._filecache.clear()
1531
1531
1532 def walk(self, match, node=None):
1532 def walk(self, match, node=None):
1533 '''
1533 '''
1534 walk recursively through the directory tree or a given
1534 walk recursively through the directory tree or a given
1535 changeset, finding all files matched by the match
1535 changeset, finding all files matched by the match
1536 function
1536 function
1537 '''
1537 '''
1538 return self[node].walk(match)
1538 return self[node].walk(match)
1539
1539
1540 def status(self, node1='.', node2=None, match=None,
1540 def status(self, node1='.', node2=None, match=None,
1541 ignored=False, clean=False, unknown=False,
1541 ignored=False, clean=False, unknown=False,
1542 listsubrepos=False):
1542 listsubrepos=False):
1543 """return status of files between two nodes or node and working
1543 """return status of files between two nodes or node and working
1544 directory.
1544 directory.
1545
1545
1546 If node1 is None, use the first dirstate parent instead.
1546 If node1 is None, use the first dirstate parent instead.
1547 If node2 is None, compare node1 with working directory.
1547 If node2 is None, compare node1 with working directory.
1548 """
1548 """
1549
1549
1550 def mfmatches(ctx):
1550 def mfmatches(ctx):
1551 mf = ctx.manifest().copy()
1551 mf = ctx.manifest().copy()
1552 if match.always():
1552 if match.always():
1553 return mf
1553 return mf
1554 for fn in mf.keys():
1554 for fn in mf.keys():
1555 if not match(fn):
1555 if not match(fn):
1556 del mf[fn]
1556 del mf[fn]
1557 return mf
1557 return mf
1558
1558
1559 if isinstance(node1, context.changectx):
1559 if isinstance(node1, context.changectx):
1560 ctx1 = node1
1560 ctx1 = node1
1561 else:
1561 else:
1562 ctx1 = self[node1]
1562 ctx1 = self[node1]
1563 if isinstance(node2, context.changectx):
1563 if isinstance(node2, context.changectx):
1564 ctx2 = node2
1564 ctx2 = node2
1565 else:
1565 else:
1566 ctx2 = self[node2]
1566 ctx2 = self[node2]
1567
1567
1568 working = ctx2.rev() is None
1568 working = ctx2.rev() is None
1569 parentworking = working and ctx1 == self['.']
1569 parentworking = working and ctx1 == self['.']
1570 match = match or matchmod.always(self.root, self.getcwd())
1570 match = match or matchmod.always(self.root, self.getcwd())
1571 listignored, listclean, listunknown = ignored, clean, unknown
1571 listignored, listclean, listunknown = ignored, clean, unknown
1572
1572
1573 # load earliest manifest first for caching reasons
1573 # load earliest manifest first for caching reasons
1574 if not working and ctx2.rev() < ctx1.rev():
1574 if not working and ctx2.rev() < ctx1.rev():
1575 ctx2.manifest()
1575 ctx2.manifest()
1576
1576
1577 if not parentworking:
1577 if not parentworking:
1578 def bad(f, msg):
1578 def bad(f, msg):
1579 # 'f' may be a directory pattern from 'match.files()',
1579 # 'f' may be a directory pattern from 'match.files()',
1580 # so 'f not in ctx1' is not enough
1580 # so 'f not in ctx1' is not enough
1581 if f not in ctx1 and f not in ctx1.dirs():
1581 if f not in ctx1 and f not in ctx1.dirs():
1582 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1582 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1583 match.bad = bad
1583 match.bad = bad
1584
1584
1585 if working: # we need to scan the working dir
1585 if working: # we need to scan the working dir
1586 subrepos = []
1586 subrepos = []
1587 if '.hgsub' in self.dirstate:
1587 if '.hgsub' in self.dirstate:
1588 subrepos = ctx2.substate.keys()
1588 subrepos = ctx2.substate.keys()
1589 s = self.dirstate.status(match, subrepos, listignored,
1589 s = self.dirstate.status(match, subrepos, listignored,
1590 listclean, listunknown)
1590 listclean, listunknown)
1591 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1591 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1592
1592
1593 # check for any possibly clean files
1593 # check for any possibly clean files
1594 if parentworking and cmp:
1594 if parentworking and cmp:
1595 fixup = []
1595 fixup = []
1596 # do a full compare of any files that might have changed
1596 # do a full compare of any files that might have changed
1597 for f in sorted(cmp):
1597 for f in sorted(cmp):
1598 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1598 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1599 or ctx1[f].cmp(ctx2[f])):
1599 or ctx1[f].cmp(ctx2[f])):
1600 modified.append(f)
1600 modified.append(f)
1601 else:
1601 else:
1602 fixup.append(f)
1602 fixup.append(f)
1603
1603
1604 # update dirstate for files that are actually clean
1604 # update dirstate for files that are actually clean
1605 if fixup:
1605 if fixup:
1606 if listclean:
1606 if listclean:
1607 clean += fixup
1607 clean += fixup
1608
1608
1609 try:
1609 try:
1610 # updating the dirstate is optional
1610 # updating the dirstate is optional
1611 # so we don't wait on the lock
1611 # so we don't wait on the lock
1612 wlock = self.wlock(False)
1612 wlock = self.wlock(False)
1613 try:
1613 try:
1614 for f in fixup:
1614 for f in fixup:
1615 self.dirstate.normal(f)
1615 self.dirstate.normal(f)
1616 finally:
1616 finally:
1617 wlock.release()
1617 wlock.release()
1618 except error.LockError:
1618 except error.LockError:
1619 pass
1619 pass
1620
1620
1621 if not parentworking:
1621 if not parentworking:
1622 mf1 = mfmatches(ctx1)
1622 mf1 = mfmatches(ctx1)
1623 if working:
1623 if working:
1624 # we are comparing working dir against non-parent
1624 # we are comparing working dir against non-parent
1625 # generate a pseudo-manifest for the working dir
1625 # generate a pseudo-manifest for the working dir
1626 mf2 = mfmatches(self['.'])
1626 mf2 = mfmatches(self['.'])
1627 for f in cmp + modified + added:
1627 for f in cmp + modified + added:
1628 mf2[f] = None
1628 mf2[f] = None
1629 mf2.set(f, ctx2.flags(f))
1629 mf2.set(f, ctx2.flags(f))
1630 for f in removed:
1630 for f in removed:
1631 if f in mf2:
1631 if f in mf2:
1632 del mf2[f]
1632 del mf2[f]
1633 else:
1633 else:
1634 # we are comparing two revisions
1634 # we are comparing two revisions
1635 deleted, unknown, ignored = [], [], []
1635 deleted, unknown, ignored = [], [], []
1636 mf2 = mfmatches(ctx2)
1636 mf2 = mfmatches(ctx2)
1637
1637
1638 modified, added, clean = [], [], []
1638 modified, added, clean = [], [], []
1639 withflags = mf1.withflags() | mf2.withflags()
1639 withflags = mf1.withflags() | mf2.withflags()
1640 for fn in mf2:
1640 for fn in mf2:
1641 if fn in mf1:
1641 if fn in mf1:
1642 if (fn not in deleted and
1642 if (fn not in deleted and
1643 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1643 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1644 (mf1[fn] != mf2[fn] and
1644 (mf1[fn] != mf2[fn] and
1645 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1645 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1646 modified.append(fn)
1646 modified.append(fn)
1647 elif listclean:
1647 elif listclean:
1648 clean.append(fn)
1648 clean.append(fn)
1649 del mf1[fn]
1649 del mf1[fn]
1650 elif fn not in deleted:
1650 elif fn not in deleted:
1651 added.append(fn)
1651 added.append(fn)
1652 removed = mf1.keys()
1652 removed = mf1.keys()
1653
1653
1654 if working and modified and not self.dirstate._checklink:
1654 if working and modified and not self.dirstate._checklink:
1655 # Symlink placeholders may get non-symlink-like contents
1655 # Symlink placeholders may get non-symlink-like contents
1656 # via user error or dereferencing by NFS or Samba servers,
1656 # via user error or dereferencing by NFS or Samba servers,
1657 # so we filter out any placeholders that don't look like a
1657 # so we filter out any placeholders that don't look like a
1658 # symlink
1658 # symlink
1659 sane = []
1659 sane = []
1660 for f in modified:
1660 for f in modified:
1661 if ctx2.flags(f) == 'l':
1661 if ctx2.flags(f) == 'l':
1662 d = ctx2[f].data()
1662 d = ctx2[f].data()
1663 if len(d) >= 1024 or '\n' in d or util.binary(d):
1663 if len(d) >= 1024 or '\n' in d or util.binary(d):
1664 self.ui.debug('ignoring suspect symlink placeholder'
1664 self.ui.debug('ignoring suspect symlink placeholder'
1665 ' "%s"\n' % f)
1665 ' "%s"\n' % f)
1666 continue
1666 continue
1667 sane.append(f)
1667 sane.append(f)
1668 modified = sane
1668 modified = sane
1669
1669
1670 r = modified, added, removed, deleted, unknown, ignored, clean
1670 r = modified, added, removed, deleted, unknown, ignored, clean
1671
1671
1672 if listsubrepos:
1672 if listsubrepos:
1673 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1673 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1674 if working:
1674 if working:
1675 rev2 = None
1675 rev2 = None
1676 else:
1676 else:
1677 rev2 = ctx2.substate[subpath][1]
1677 rev2 = ctx2.substate[subpath][1]
1678 try:
1678 try:
1679 submatch = matchmod.narrowmatcher(subpath, match)
1679 submatch = matchmod.narrowmatcher(subpath, match)
1680 s = sub.status(rev2, match=submatch, ignored=listignored,
1680 s = sub.status(rev2, match=submatch, ignored=listignored,
1681 clean=listclean, unknown=listunknown,
1681 clean=listclean, unknown=listunknown,
1682 listsubrepos=True)
1682 listsubrepos=True)
1683 for rfiles, sfiles in zip(r, s):
1683 for rfiles, sfiles in zip(r, s):
1684 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1684 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1685 except error.LookupError:
1685 except error.LookupError:
1686 self.ui.status(_("skipping missing subrepository: %s\n")
1686 self.ui.status(_("skipping missing subrepository: %s\n")
1687 % subpath)
1687 % subpath)
1688
1688
1689 for l in r:
1689 for l in r:
1690 l.sort()
1690 l.sort()
1691 return r
1691 return r
1692
1692
1693 def heads(self, start=None):
1693 def heads(self, start=None):
1694 heads = self.changelog.heads(start)
1694 heads = self.changelog.heads(start)
1695 # sort the output in rev descending order
1695 # sort the output in rev descending order
1696 return sorted(heads, key=self.changelog.rev, reverse=True)
1696 return sorted(heads, key=self.changelog.rev, reverse=True)
1697
1697
1698 def branchheads(self, branch=None, start=None, closed=False):
1698 def branchheads(self, branch=None, start=None, closed=False):
1699 '''return a (possibly filtered) list of heads for the given branch
1699 '''return a (possibly filtered) list of heads for the given branch
1700
1700
1701 Heads are returned in topological order, from newest to oldest.
1701 Heads are returned in topological order, from newest to oldest.
1702 If branch is None, use the dirstate branch.
1702 If branch is None, use the dirstate branch.
1703 If start is not None, return only heads reachable from start.
1703 If start is not None, return only heads reachable from start.
1704 If closed is True, return heads that are marked as closed as well.
1704 If closed is True, return heads that are marked as closed as well.
1705 '''
1705 '''
1706 if branch is None:
1706 if branch is None:
1707 branch = self[None].branch()
1707 branch = self[None].branch()
1708 branches = self.branchmap()
1708 branches = self.branchmap()
1709 if branch not in branches:
1709 if branch not in branches:
1710 return []
1710 return []
1711 # the cache returns heads ordered lowest to highest
1711 # the cache returns heads ordered lowest to highest
1712 bheads = list(reversed(branches[branch]))
1712 bheads = list(reversed(branches[branch]))
1713 if start is not None:
1713 if start is not None:
1714 # filter out the heads that cannot be reached from startrev
1714 # filter out the heads that cannot be reached from startrev
1715 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1715 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1716 bheads = [h for h in bheads if h in fbheads]
1716 bheads = [h for h in bheads if h in fbheads]
1717 if not closed:
1717 if not closed:
1718 bheads = [h for h in bheads if not self[h].closesbranch()]
1718 bheads = [h for h in bheads if not self[h].closesbranch()]
1719 return bheads
1719 return bheads
1720
1720
1721 def branches(self, nodes):
1721 def branches(self, nodes):
1722 if not nodes:
1722 if not nodes:
1723 nodes = [self.changelog.tip()]
1723 nodes = [self.changelog.tip()]
1724 b = []
1724 b = []
1725 for n in nodes:
1725 for n in nodes:
1726 t = n
1726 t = n
1727 while True:
1727 while True:
1728 p = self.changelog.parents(n)
1728 p = self.changelog.parents(n)
1729 if p[1] != nullid or p[0] == nullid:
1729 if p[1] != nullid or p[0] == nullid:
1730 b.append((t, n, p[0], p[1]))
1730 b.append((t, n, p[0], p[1]))
1731 break
1731 break
1732 n = p[0]
1732 n = p[0]
1733 return b
1733 return b
1734
1734
1735 def between(self, pairs):
1735 def between(self, pairs):
1736 r = []
1736 r = []
1737
1737
1738 for top, bottom in pairs:
1738 for top, bottom in pairs:
1739 n, l, i = top, [], 0
1739 n, l, i = top, [], 0
1740 f = 1
1740 f = 1
1741
1741
1742 while n != bottom and n != nullid:
1742 while n != bottom and n != nullid:
1743 p = self.changelog.parents(n)[0]
1743 p = self.changelog.parents(n)[0]
1744 if i == f:
1744 if i == f:
1745 l.append(n)
1745 l.append(n)
1746 f = f * 2
1746 f = f * 2
1747 n = p
1747 n = p
1748 i += 1
1748 i += 1
1749
1749
1750 r.append(l)
1750 r.append(l)
1751
1751
1752 return r
1752 return r
1753
1753
1754 def pull(self, remote, heads=None, force=False):
1754 def pull(self, remote, heads=None, force=False):
1755 # don't open transaction for nothing or you break future useful
1755 # don't open transaction for nothing or you break future useful
1756 # rollback call
1756 # rollback call
1757 tr = None
1757 tr = None
1758 trname = 'pull\n' + util.hidepassword(remote.url())
1758 trname = 'pull\n' + util.hidepassword(remote.url())
1759 lock = self.lock()
1759 lock = self.lock()
1760 try:
1760 try:
1761 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1761 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1762 force=force)
1762 force=force)
1763 common, fetch, rheads = tmp
1763 common, fetch, rheads = tmp
1764 if not fetch:
1764 if not fetch:
1765 self.ui.status(_("no changes found\n"))
1765 self.ui.status(_("no changes found\n"))
1766 added = []
1766 added = []
1767 result = 0
1767 result = 0
1768 else:
1768 else:
1769 tr = self.transaction(trname)
1769 tr = self.transaction(trname)
1770 if heads is None and list(common) == [nullid]:
1770 if heads is None and list(common) == [nullid]:
1771 self.ui.status(_("requesting all changes\n"))
1771 self.ui.status(_("requesting all changes\n"))
1772 elif heads is None and remote.capable('changegroupsubset'):
1772 elif heads is None and remote.capable('changegroupsubset'):
1773 # issue1320, avoid a race if remote changed after discovery
1773 # issue1320, avoid a race if remote changed after discovery
1774 heads = rheads
1774 heads = rheads
1775
1775
1776 if remote.capable('getbundle'):
1776 if remote.capable('getbundle'):
1777 cg = remote.getbundle('pull', common=common,
1777 cg = remote.getbundle('pull', common=common,
1778 heads=heads or rheads)
1778 heads=heads or rheads)
1779 elif heads is None:
1779 elif heads is None:
1780 cg = remote.changegroup(fetch, 'pull')
1780 cg = remote.changegroup(fetch, 'pull')
1781 elif not remote.capable('changegroupsubset'):
1781 elif not remote.capable('changegroupsubset'):
1782 raise util.Abort(_("partial pull cannot be done because "
1782 raise util.Abort(_("partial pull cannot be done because "
1783 "other repository doesn't support "
1783 "other repository doesn't support "
1784 "changegroupsubset."))
1784 "changegroupsubset."))
1785 else:
1785 else:
1786 cg = remote.changegroupsubset(fetch, heads, 'pull')
1786 cg = remote.changegroupsubset(fetch, heads, 'pull')
1787 clstart = len(self.changelog)
1787 clstart = len(self.changelog)
1788 result = self.addchangegroup(cg, 'pull', remote.url())
1788 result = self.addchangegroup(cg, 'pull', remote.url())
1789 clend = len(self.changelog)
1789 clend = len(self.changelog)
1790 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1790 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1791
1791
1792 # compute target subset
1792 # compute target subset
1793 if heads is None:
1793 if heads is None:
1794 # We pulled every thing possible
1794 # We pulled every thing possible
1795 # sync on everything common
1795 # sync on everything common
1796 subset = common + added
1796 subset = common + added
1797 else:
1797 else:
1798 # We pulled a specific subset
1798 # We pulled a specific subset
1799 # sync on this subset
1799 # sync on this subset
1800 subset = heads
1800 subset = heads
1801
1801
1802 # Get remote phases data from remote
1802 # Get remote phases data from remote
1803 remotephases = remote.listkeys('phases')
1803 remotephases = remote.listkeys('phases')
1804 publishing = bool(remotephases.get('publishing', False))
1804 publishing = bool(remotephases.get('publishing', False))
1805 if remotephases and not publishing:
1805 if remotephases and not publishing:
1806 # remote is new and unpublishing
1806 # remote is new and unpublishing
1807 pheads, _dr = phases.analyzeremotephases(self, subset,
1807 pheads, _dr = phases.analyzeremotephases(self, subset,
1808 remotephases)
1808 remotephases)
1809 phases.advanceboundary(self, phases.public, pheads)
1809 phases.advanceboundary(self, phases.public, pheads)
1810 phases.advanceboundary(self, phases.draft, subset)
1810 phases.advanceboundary(self, phases.draft, subset)
1811 else:
1811 else:
1812 # Remote is old or publishing all common changesets
1812 # Remote is old or publishing all common changesets
1813 # should be seen as public
1813 # should be seen as public
1814 phases.advanceboundary(self, phases.public, subset)
1814 phases.advanceboundary(self, phases.public, subset)
1815
1815
1816 if obsolete._enabled:
1816 if obsolete._enabled:
1817 self.ui.debug('fetching remote obsolete markers\n')
1817 self.ui.debug('fetching remote obsolete markers\n')
1818 remoteobs = remote.listkeys('obsolete')
1818 remoteobs = remote.listkeys('obsolete')
1819 if 'dump0' in remoteobs:
1819 if 'dump0' in remoteobs:
1820 if tr is None:
1820 if tr is None:
1821 tr = self.transaction(trname)
1821 tr = self.transaction(trname)
1822 for key in sorted(remoteobs, reverse=True):
1822 for key in sorted(remoteobs, reverse=True):
1823 if key.startswith('dump'):
1823 if key.startswith('dump'):
1824 data = base85.b85decode(remoteobs[key])
1824 data = base85.b85decode(remoteobs[key])
1825 self.obsstore.mergemarkers(tr, data)
1825 self.obsstore.mergemarkers(tr, data)
1826 if tr is not None:
1826 if tr is not None:
1827 tr.close()
1827 tr.close()
1828 finally:
1828 finally:
1829 if tr is not None:
1829 if tr is not None:
1830 tr.release()
1830 tr.release()
1831 lock.release()
1831 lock.release()
1832
1832
1833 return result
1833 return result
1834
1834
1835 def checkpush(self, force, revs):
1835 def checkpush(self, force, revs):
1836 """Extensions can override this function if additional checks have
1836 """Extensions can override this function if additional checks have
1837 to be performed before pushing, or call it if they override push
1837 to be performed before pushing, or call it if they override push
1838 command.
1838 command.
1839 """
1839 """
1840 pass
1840 pass
1841
1841
1842 def push(self, remote, force=False, revs=None, newbranch=False):
1842 def push(self, remote, force=False, revs=None, newbranch=False):
1843 '''Push outgoing changesets (limited by revs) from the current
1843 '''Push outgoing changesets (limited by revs) from the current
1844 repository to remote. Return an integer:
1844 repository to remote. Return an integer:
1845 - None means nothing to push
1845 - None means nothing to push
1846 - 0 means HTTP error
1846 - 0 means HTTP error
1847 - 1 means we pushed and remote head count is unchanged *or*
1847 - 1 means we pushed and remote head count is unchanged *or*
1848 we have outgoing changesets but refused to push
1848 we have outgoing changesets but refused to push
1849 - other values as described by addchangegroup()
1849 - other values as described by addchangegroup()
1850 '''
1850 '''
1851 # there are two ways to push to remote repo:
1851 # there are two ways to push to remote repo:
1852 #
1852 #
1853 # addchangegroup assumes local user can lock remote
1853 # addchangegroup assumes local user can lock remote
1854 # repo (local filesystem, old ssh servers).
1854 # repo (local filesystem, old ssh servers).
1855 #
1855 #
1856 # unbundle assumes local user cannot lock remote repo (new ssh
1856 # unbundle assumes local user cannot lock remote repo (new ssh
1857 # servers, http servers).
1857 # servers, http servers).
1858
1858
1859 if not remote.canpush():
1859 if not remote.canpush():
1860 raise util.Abort(_("destination does not support push"))
1860 raise util.Abort(_("destination does not support push"))
1861 # get local lock as we might write phase data
1861 # get local lock as we might write phase data
1862 locallock = self.lock()
1862 locallock = self.lock()
1863 try:
1863 try:
1864 self.checkpush(force, revs)
1864 self.checkpush(force, revs)
1865 lock = None
1865 lock = None
1866 unbundle = remote.capable('unbundle')
1866 unbundle = remote.capable('unbundle')
1867 if not unbundle:
1867 if not unbundle:
1868 lock = remote.lock()
1868 lock = remote.lock()
1869 try:
1869 try:
1870 # discovery
1870 # discovery
1871 fci = discovery.findcommonincoming
1871 fci = discovery.findcommonincoming
1872 commoninc = fci(self, remote, force=force)
1872 commoninc = fci(self, remote, force=force)
1873 common, inc, remoteheads = commoninc
1873 common, inc, remoteheads = commoninc
1874 fco = discovery.findcommonoutgoing
1874 fco = discovery.findcommonoutgoing
1875 outgoing = fco(self, remote, onlyheads=revs,
1875 outgoing = fco(self, remote, onlyheads=revs,
1876 commoninc=commoninc, force=force)
1876 commoninc=commoninc, force=force)
1877
1877
1878
1878
1879 if not outgoing.missing:
1879 if not outgoing.missing:
1880 # nothing to push
1880 # nothing to push
1881 scmutil.nochangesfound(self.ui, self, outgoing.excluded)
1881 scmutil.nochangesfound(self.ui, self, outgoing.excluded)
1882 ret = None
1882 ret = None
1883 else:
1883 else:
1884 # something to push
1884 # something to push
1885 if not force:
1885 if not force:
1886 # if self.obsstore == False --> no obsolete
1886 # if self.obsstore == False --> no obsolete
1887 # then, save the iteration
1887 # then, save the iteration
1888 if self.obsstore:
1888 if self.obsstore:
1889 # this message are here for 80 char limit reason
1889 # this message are here for 80 char limit reason
1890 mso = _("push includes obsolete changeset: %s!")
1890 mso = _("push includes obsolete changeset: %s!")
1891 msu = _("push includes unstable changeset: %s!")
1891 msu = _("push includes unstable changeset: %s!")
1892 msb = _("push includes bumped changeset: %s!")
1892 msb = _("push includes bumped changeset: %s!")
1893 # If we are to push if there is at least one
1893 # If we are to push if there is at least one
1894 # obsolete or unstable changeset in missing, at
1894 # obsolete or unstable changeset in missing, at
1895 # least one of the missinghead will be obsolete or
1895 # least one of the missinghead will be obsolete or
1896 # unstable. So checking heads only is ok
1896 # unstable. So checking heads only is ok
1897 for node in outgoing.missingheads:
1897 for node in outgoing.missingheads:
1898 ctx = self[node]
1898 ctx = self[node]
1899 if ctx.obsolete():
1899 if ctx.obsolete():
1900 raise util.Abort(mso % ctx)
1900 raise util.Abort(mso % ctx)
1901 elif ctx.unstable():
1901 elif ctx.unstable():
1902 raise util.Abort(msu % ctx)
1902 raise util.Abort(msu % ctx)
1903 elif ctx.bumped():
1903 elif ctx.bumped():
1904 raise util.Abort(msb % ctx)
1904 raise util.Abort(msb % ctx)
1905 discovery.checkheads(self, remote, outgoing,
1905 discovery.checkheads(self, remote, outgoing,
1906 remoteheads, newbranch,
1906 remoteheads, newbranch,
1907 bool(inc))
1907 bool(inc))
1908
1908
1909 # create a changegroup from local
1909 # create a changegroup from local
1910 if revs is None and not outgoing.excluded:
1910 if revs is None and not outgoing.excluded:
1911 # push everything,
1911 # push everything,
1912 # use the fast path, no race possible on push
1912 # use the fast path, no race possible on push
1913 cg = self._changegroup(outgoing.missing, 'push')
1913 cg = self._changegroup(outgoing.missing, 'push')
1914 else:
1914 else:
1915 cg = self.getlocalbundle('push', outgoing)
1915 cg = self.getlocalbundle('push', outgoing)
1916
1916
1917 # apply changegroup to remote
1917 # apply changegroup to remote
1918 if unbundle:
1918 if unbundle:
1919 # local repo finds heads on server, finds out what
1919 # local repo finds heads on server, finds out what
1920 # revs it must push. once revs transferred, if server
1920 # revs it must push. once revs transferred, if server
1921 # finds it has different heads (someone else won
1921 # finds it has different heads (someone else won
1922 # commit/push race), server aborts.
1922 # commit/push race), server aborts.
1923 if force:
1923 if force:
1924 remoteheads = ['force']
1924 remoteheads = ['force']
1925 # ssh: return remote's addchangegroup()
1925 # ssh: return remote's addchangegroup()
1926 # http: return remote's addchangegroup() or 0 for error
1926 # http: return remote's addchangegroup() or 0 for error
1927 ret = remote.unbundle(cg, remoteheads, 'push')
1927 ret = remote.unbundle(cg, remoteheads, 'push')
1928 else:
1928 else:
1929 # we return an integer indicating remote head count
1929 # we return an integer indicating remote head count
1930 # change
1930 # change
1931 ret = remote.addchangegroup(cg, 'push', self.url())
1931 ret = remote.addchangegroup(cg, 'push', self.url())
1932
1932
1933 if ret:
1933 if ret:
1934 # push succeed, synchronize target of the push
1934 # push succeed, synchronize target of the push
1935 cheads = outgoing.missingheads
1935 cheads = outgoing.missingheads
1936 elif revs is None:
1936 elif revs is None:
1937 # All out push fails. synchronize all common
1937 # All out push fails. synchronize all common
1938 cheads = outgoing.commonheads
1938 cheads = outgoing.commonheads
1939 else:
1939 else:
1940 # I want cheads = heads(::missingheads and ::commonheads)
1940 # I want cheads = heads(::missingheads and ::commonheads)
1941 # (missingheads is revs with secret changeset filtered out)
1941 # (missingheads is revs with secret changeset filtered out)
1942 #
1942 #
1943 # This can be expressed as:
1943 # This can be expressed as:
1944 # cheads = ( (missingheads and ::commonheads)
1944 # cheads = ( (missingheads and ::commonheads)
1945 # + (commonheads and ::missingheads))"
1945 # + (commonheads and ::missingheads))"
1946 # )
1946 # )
1947 #
1947 #
1948 # while trying to push we already computed the following:
1948 # while trying to push we already computed the following:
1949 # common = (::commonheads)
1949 # common = (::commonheads)
1950 # missing = ((commonheads::missingheads) - commonheads)
1950 # missing = ((commonheads::missingheads) - commonheads)
1951 #
1951 #
1952 # We can pick:
1952 # We can pick:
1953 # * missingheads part of common (::commonheads)
1953 # * missingheads part of common (::commonheads)
1954 common = set(outgoing.common)
1954 common = set(outgoing.common)
1955 cheads = [node for node in revs if node in common]
1955 cheads = [node for node in revs if node in common]
1956 # and
1956 # and
1957 # * commonheads parents on missing
1957 # * commonheads parents on missing
1958 revset = self.set('%ln and parents(roots(%ln))',
1958 revset = self.set('%ln and parents(roots(%ln))',
1959 outgoing.commonheads,
1959 outgoing.commonheads,
1960 outgoing.missing)
1960 outgoing.missing)
1961 cheads.extend(c.node() for c in revset)
1961 cheads.extend(c.node() for c in revset)
1962 # even when we don't push, exchanging phase data is useful
1962 # even when we don't push, exchanging phase data is useful
1963 remotephases = remote.listkeys('phases')
1963 remotephases = remote.listkeys('phases')
1964 if not remotephases: # old server or public only repo
1964 if not remotephases: # old server or public only repo
1965 phases.advanceboundary(self, phases.public, cheads)
1965 phases.advanceboundary(self, phases.public, cheads)
1966 # don't push any phase data as there is nothing to push
1966 # don't push any phase data as there is nothing to push
1967 else:
1967 else:
1968 ana = phases.analyzeremotephases(self, cheads, remotephases)
1968 ana = phases.analyzeremotephases(self, cheads, remotephases)
1969 pheads, droots = ana
1969 pheads, droots = ana
1970 ### Apply remote phase on local
1970 ### Apply remote phase on local
1971 if remotephases.get('publishing', False):
1971 if remotephases.get('publishing', False):
1972 phases.advanceboundary(self, phases.public, cheads)
1972 phases.advanceboundary(self, phases.public, cheads)
1973 else: # publish = False
1973 else: # publish = False
1974 phases.advanceboundary(self, phases.public, pheads)
1974 phases.advanceboundary(self, phases.public, pheads)
1975 phases.advanceboundary(self, phases.draft, cheads)
1975 phases.advanceboundary(self, phases.draft, cheads)
1976 ### Apply local phase on remote
1976 ### Apply local phase on remote
1977
1977
1978 # Get the list of all revs draft on remote by public here.
1978 # Get the list of all revs draft on remote by public here.
1979 # XXX Beware that revset break if droots is not strictly
1979 # XXX Beware that revset break if droots is not strictly
1980 # XXX root we may want to ensure it is but it is costly
1980 # XXX root we may want to ensure it is but it is costly
1981 outdated = self.set('heads((%ln::%ln) and public())',
1981 outdated = self.set('heads((%ln::%ln) and public())',
1982 droots, cheads)
1982 droots, cheads)
1983 for newremotehead in outdated:
1983 for newremotehead in outdated:
1984 r = remote.pushkey('phases',
1984 r = remote.pushkey('phases',
1985 newremotehead.hex(),
1985 newremotehead.hex(),
1986 str(phases.draft),
1986 str(phases.draft),
1987 str(phases.public))
1987 str(phases.public))
1988 if not r:
1988 if not r:
1989 self.ui.warn(_('updating %s to public failed!\n')
1989 self.ui.warn(_('updating %s to public failed!\n')
1990 % newremotehead)
1990 % newremotehead)
1991 self.ui.debug('try to push obsolete markers to remote\n')
1991 self.ui.debug('try to push obsolete markers to remote\n')
1992 if (obsolete._enabled and self.obsstore and
1992 if (obsolete._enabled and self.obsstore and
1993 'obsolete' in remote.listkeys('namespaces')):
1993 'obsolete' in remote.listkeys('namespaces')):
1994 rslts = []
1994 rslts = []
1995 remotedata = self.listkeys('obsolete')
1995 remotedata = self.listkeys('obsolete')
1996 for key in sorted(remotedata, reverse=True):
1996 for key in sorted(remotedata, reverse=True):
1997 # reverse sort to ensure we end with dump0
1997 # reverse sort to ensure we end with dump0
1998 data = remotedata[key]
1998 data = remotedata[key]
1999 rslts.append(remote.pushkey('obsolete', key, '', data))
1999 rslts.append(remote.pushkey('obsolete', key, '', data))
2000 if [r for r in rslts if not r]:
2000 if [r for r in rslts if not r]:
2001 msg = _('failed to push some obsolete markers!\n')
2001 msg = _('failed to push some obsolete markers!\n')
2002 self.ui.warn(msg)
2002 self.ui.warn(msg)
2003 finally:
2003 finally:
2004 if lock is not None:
2004 if lock is not None:
2005 lock.release()
2005 lock.release()
2006 finally:
2006 finally:
2007 locallock.release()
2007 locallock.release()
2008
2008
2009 self.ui.debug("checking for updated bookmarks\n")
2009 self.ui.debug("checking for updated bookmarks\n")
2010 rb = remote.listkeys('bookmarks')
2010 rb = remote.listkeys('bookmarks')
2011 for k in rb.keys():
2011 for k in rb.keys():
2012 if k in self._bookmarks:
2012 if k in self._bookmarks:
2013 nr, nl = rb[k], hex(self._bookmarks[k])
2013 nr, nl = rb[k], hex(self._bookmarks[k])
2014 if nr in self:
2014 if nr in self:
2015 cr = self[nr]
2015 cr = self[nr]
2016 cl = self[nl]
2016 cl = self[nl]
2017 if bookmarks.validdest(self, cr, cl):
2017 if bookmarks.validdest(self, cr, cl):
2018 r = remote.pushkey('bookmarks', k, nr, nl)
2018 r = remote.pushkey('bookmarks', k, nr, nl)
2019 if r:
2019 if r:
2020 self.ui.status(_("updating bookmark %s\n") % k)
2020 self.ui.status(_("updating bookmark %s\n") % k)
2021 else:
2021 else:
2022 self.ui.warn(_('updating bookmark %s'
2022 self.ui.warn(_('updating bookmark %s'
2023 ' failed!\n') % k)
2023 ' failed!\n') % k)
2024
2024
2025 return ret
2025 return ret
2026
2026
2027 def changegroupinfo(self, nodes, source):
2027 def changegroupinfo(self, nodes, source):
2028 if self.ui.verbose or source == 'bundle':
2028 if self.ui.verbose or source == 'bundle':
2029 self.ui.status(_("%d changesets found\n") % len(nodes))
2029 self.ui.status(_("%d changesets found\n") % len(nodes))
2030 if self.ui.debugflag:
2030 if self.ui.debugflag:
2031 self.ui.debug("list of changesets:\n")
2031 self.ui.debug("list of changesets:\n")
2032 for node in nodes:
2032 for node in nodes:
2033 self.ui.debug("%s\n" % hex(node))
2033 self.ui.debug("%s\n" % hex(node))
2034
2034
2035 def changegroupsubset(self, bases, heads, source):
2035 def changegroupsubset(self, bases, heads, source):
2036 """Compute a changegroup consisting of all the nodes that are
2036 """Compute a changegroup consisting of all the nodes that are
2037 descendants of any of the bases and ancestors of any of the heads.
2037 descendants of any of the bases and ancestors of any of the heads.
2038 Return a chunkbuffer object whose read() method will return
2038 Return a chunkbuffer object whose read() method will return
2039 successive changegroup chunks.
2039 successive changegroup chunks.
2040
2040
2041 It is fairly complex as determining which filenodes and which
2041 It is fairly complex as determining which filenodes and which
2042 manifest nodes need to be included for the changeset to be complete
2042 manifest nodes need to be included for the changeset to be complete
2043 is non-trivial.
2043 is non-trivial.
2044
2044
2045 Another wrinkle is doing the reverse, figuring out which changeset in
2045 Another wrinkle is doing the reverse, figuring out which changeset in
2046 the changegroup a particular filenode or manifestnode belongs to.
2046 the changegroup a particular filenode or manifestnode belongs to.
2047 """
2047 """
2048 cl = self.changelog
2048 cl = self.changelog
2049 if not bases:
2049 if not bases:
2050 bases = [nullid]
2050 bases = [nullid]
2051 csets, bases, heads = cl.nodesbetween(bases, heads)
2051 csets, bases, heads = cl.nodesbetween(bases, heads)
2052 # We assume that all ancestors of bases are known
2052 # We assume that all ancestors of bases are known
2053 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2053 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2054 return self._changegroupsubset(common, csets, heads, source)
2054 return self._changegroupsubset(common, csets, heads, source)
2055
2055
2056 def getlocalbundle(self, source, outgoing):
2056 def getlocalbundle(self, source, outgoing):
2057 """Like getbundle, but taking a discovery.outgoing as an argument.
2057 """Like getbundle, but taking a discovery.outgoing as an argument.
2058
2058
2059 This is only implemented for local repos and reuses potentially
2059 This is only implemented for local repos and reuses potentially
2060 precomputed sets in outgoing."""
2060 precomputed sets in outgoing."""
2061 if not outgoing.missing:
2061 if not outgoing.missing:
2062 return None
2062 return None
2063 return self._changegroupsubset(outgoing.common,
2063 return self._changegroupsubset(outgoing.common,
2064 outgoing.missing,
2064 outgoing.missing,
2065 outgoing.missingheads,
2065 outgoing.missingheads,
2066 source)
2066 source)
2067
2067
2068 def getbundle(self, source, heads=None, common=None):
2068 def getbundle(self, source, heads=None, common=None):
2069 """Like changegroupsubset, but returns the set difference between the
2069 """Like changegroupsubset, but returns the set difference between the
2070 ancestors of heads and the ancestors common.
2070 ancestors of heads and the ancestors common.
2071
2071
2072 If heads is None, use the local heads. If common is None, use [nullid].
2072 If heads is None, use the local heads. If common is None, use [nullid].
2073
2073
2074 The nodes in common might not all be known locally due to the way the
2074 The nodes in common might not all be known locally due to the way the
2075 current discovery protocol works.
2075 current discovery protocol works.
2076 """
2076 """
2077 cl = self.changelog
2077 cl = self.changelog
2078 if common:
2078 if common:
2079 nm = cl.nodemap
2079 nm = cl.nodemap
2080 common = [n for n in common if n in nm]
2080 common = [n for n in common if n in nm]
2081 else:
2081 else:
2082 common = [nullid]
2082 common = [nullid]
2083 if not heads:
2083 if not heads:
2084 heads = cl.heads()
2084 heads = cl.heads()
2085 return self.getlocalbundle(source,
2085 return self.getlocalbundle(source,
2086 discovery.outgoing(cl, common, heads))
2086 discovery.outgoing(cl, common, heads))
2087
2087
2088 @unfilteredmeth
2088 def _changegroupsubset(self, commonrevs, csets, heads, source):
2089 def _changegroupsubset(self, commonrevs, csets, heads, source):
2089
2090
2090 cl = self.changelog
2091 cl = self.changelog
2091 mf = self.manifest
2092 mf = self.manifest
2092 mfs = {} # needed manifests
2093 mfs = {} # needed manifests
2093 fnodes = {} # needed file nodes
2094 fnodes = {} # needed file nodes
2094 changedfiles = set()
2095 changedfiles = set()
2095 fstate = ['', {}]
2096 fstate = ['', {}]
2096 count = [0, 0]
2097 count = [0, 0]
2097
2098
2098 # can we go through the fast path ?
2099 # can we go through the fast path ?
2099 heads.sort()
2100 heads.sort()
2100 if heads == sorted(self.heads()):
2101 if heads == sorted(self.heads()):
2101 return self._changegroup(csets, source)
2102 return self._changegroup(csets, source)
2102
2103
2103 # slow path
2104 # slow path
2104 self.hook('preoutgoing', throw=True, source=source)
2105 self.hook('preoutgoing', throw=True, source=source)
2105 self.changegroupinfo(csets, source)
2106 self.changegroupinfo(csets, source)
2106
2107
2107 # filter any nodes that claim to be part of the known set
2108 # filter any nodes that claim to be part of the known set
2108 def prune(revlog, missing):
2109 def prune(revlog, missing):
2109 rr, rl = revlog.rev, revlog.linkrev
2110 rr, rl = revlog.rev, revlog.linkrev
2110 return [n for n in missing
2111 return [n for n in missing
2111 if rl(rr(n)) not in commonrevs]
2112 if rl(rr(n)) not in commonrevs]
2112
2113
2113 progress = self.ui.progress
2114 progress = self.ui.progress
2114 _bundling = _('bundling')
2115 _bundling = _('bundling')
2115 _changesets = _('changesets')
2116 _changesets = _('changesets')
2116 _manifests = _('manifests')
2117 _manifests = _('manifests')
2117 _files = _('files')
2118 _files = _('files')
2118
2119
2119 def lookup(revlog, x):
2120 def lookup(revlog, x):
2120 if revlog == cl:
2121 if revlog == cl:
2121 c = cl.read(x)
2122 c = cl.read(x)
2122 changedfiles.update(c[3])
2123 changedfiles.update(c[3])
2123 mfs.setdefault(c[0], x)
2124 mfs.setdefault(c[0], x)
2124 count[0] += 1
2125 count[0] += 1
2125 progress(_bundling, count[0],
2126 progress(_bundling, count[0],
2126 unit=_changesets, total=count[1])
2127 unit=_changesets, total=count[1])
2127 return x
2128 return x
2128 elif revlog == mf:
2129 elif revlog == mf:
2129 clnode = mfs[x]
2130 clnode = mfs[x]
2130 mdata = mf.readfast(x)
2131 mdata = mf.readfast(x)
2131 for f, n in mdata.iteritems():
2132 for f, n in mdata.iteritems():
2132 if f in changedfiles:
2133 if f in changedfiles:
2133 fnodes[f].setdefault(n, clnode)
2134 fnodes[f].setdefault(n, clnode)
2134 count[0] += 1
2135 count[0] += 1
2135 progress(_bundling, count[0],
2136 progress(_bundling, count[0],
2136 unit=_manifests, total=count[1])
2137 unit=_manifests, total=count[1])
2137 return clnode
2138 return clnode
2138 else:
2139 else:
2139 progress(_bundling, count[0], item=fstate[0],
2140 progress(_bundling, count[0], item=fstate[0],
2140 unit=_files, total=count[1])
2141 unit=_files, total=count[1])
2141 return fstate[1][x]
2142 return fstate[1][x]
2142
2143
2143 bundler = changegroup.bundle10(lookup)
2144 bundler = changegroup.bundle10(lookup)
2144 reorder = self.ui.config('bundle', 'reorder', 'auto')
2145 reorder = self.ui.config('bundle', 'reorder', 'auto')
2145 if reorder == 'auto':
2146 if reorder == 'auto':
2146 reorder = None
2147 reorder = None
2147 else:
2148 else:
2148 reorder = util.parsebool(reorder)
2149 reorder = util.parsebool(reorder)
2149
2150
2150 def gengroup():
2151 def gengroup():
2151 # Create a changenode group generator that will call our functions
2152 # Create a changenode group generator that will call our functions
2152 # back to lookup the owning changenode and collect information.
2153 # back to lookup the owning changenode and collect information.
2153 count[:] = [0, len(csets)]
2154 count[:] = [0, len(csets)]
2154 for chunk in cl.group(csets, bundler, reorder=reorder):
2155 for chunk in cl.group(csets, bundler, reorder=reorder):
2155 yield chunk
2156 yield chunk
2156 progress(_bundling, None)
2157 progress(_bundling, None)
2157
2158
2158 # Create a generator for the manifestnodes that calls our lookup
2159 # Create a generator for the manifestnodes that calls our lookup
2159 # and data collection functions back.
2160 # and data collection functions back.
2160 for f in changedfiles:
2161 for f in changedfiles:
2161 fnodes[f] = {}
2162 fnodes[f] = {}
2162 count[:] = [0, len(mfs)]
2163 count[:] = [0, len(mfs)]
2163 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2164 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2164 yield chunk
2165 yield chunk
2165 progress(_bundling, None)
2166 progress(_bundling, None)
2166
2167
2167 mfs.clear()
2168 mfs.clear()
2168
2169
2169 # Go through all our files in order sorted by name.
2170 # Go through all our files in order sorted by name.
2170 count[:] = [0, len(changedfiles)]
2171 count[:] = [0, len(changedfiles)]
2171 for fname in sorted(changedfiles):
2172 for fname in sorted(changedfiles):
2172 filerevlog = self.file(fname)
2173 filerevlog = self.file(fname)
2173 if not len(filerevlog):
2174 if not len(filerevlog):
2174 raise util.Abort(_("empty or missing revlog for %s")
2175 raise util.Abort(_("empty or missing revlog for %s")
2175 % fname)
2176 % fname)
2176 fstate[0] = fname
2177 fstate[0] = fname
2177 fstate[1] = fnodes.pop(fname, {})
2178 fstate[1] = fnodes.pop(fname, {})
2178
2179
2179 nodelist = prune(filerevlog, fstate[1])
2180 nodelist = prune(filerevlog, fstate[1])
2180 if nodelist:
2181 if nodelist:
2181 count[0] += 1
2182 count[0] += 1
2182 yield bundler.fileheader(fname)
2183 yield bundler.fileheader(fname)
2183 for chunk in filerevlog.group(nodelist, bundler, reorder):
2184 for chunk in filerevlog.group(nodelist, bundler, reorder):
2184 yield chunk
2185 yield chunk
2185
2186
2186 # Signal that no more groups are left.
2187 # Signal that no more groups are left.
2187 yield bundler.close()
2188 yield bundler.close()
2188 progress(_bundling, None)
2189 progress(_bundling, None)
2189
2190
2190 if csets:
2191 if csets:
2191 self.hook('outgoing', node=hex(csets[0]), source=source)
2192 self.hook('outgoing', node=hex(csets[0]), source=source)
2192
2193
2193 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2194 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2194
2195
2195 def changegroup(self, basenodes, source):
2196 def changegroup(self, basenodes, source):
2196 # to avoid a race we use changegroupsubset() (issue1320)
2197 # to avoid a race we use changegroupsubset() (issue1320)
2197 return self.changegroupsubset(basenodes, self.heads(), source)
2198 return self.changegroupsubset(basenodes, self.heads(), source)
2198
2199
2200 @unfilteredmeth
2199 def _changegroup(self, nodes, source):
2201 def _changegroup(self, nodes, source):
2200 """Compute the changegroup of all nodes that we have that a recipient
2202 """Compute the changegroup of all nodes that we have that a recipient
2201 doesn't. Return a chunkbuffer object whose read() method will return
2203 doesn't. Return a chunkbuffer object whose read() method will return
2202 successive changegroup chunks.
2204 successive changegroup chunks.
2203
2205
2204 This is much easier than the previous function as we can assume that
2206 This is much easier than the previous function as we can assume that
2205 the recipient has any changenode we aren't sending them.
2207 the recipient has any changenode we aren't sending them.
2206
2208
2207 nodes is the set of nodes to send"""
2209 nodes is the set of nodes to send"""
2208
2210
2209 cl = self.changelog
2211 cl = self.changelog
2210 mf = self.manifest
2212 mf = self.manifest
2211 mfs = {}
2213 mfs = {}
2212 changedfiles = set()
2214 changedfiles = set()
2213 fstate = ['']
2215 fstate = ['']
2214 count = [0, 0]
2216 count = [0, 0]
2215
2217
2216 self.hook('preoutgoing', throw=True, source=source)
2218 self.hook('preoutgoing', throw=True, source=source)
2217 self.changegroupinfo(nodes, source)
2219 self.changegroupinfo(nodes, source)
2218
2220
2219 revset = set([cl.rev(n) for n in nodes])
2221 revset = set([cl.rev(n) for n in nodes])
2220
2222
2221 def gennodelst(log):
2223 def gennodelst(log):
2222 ln, llr = log.node, log.linkrev
2224 ln, llr = log.node, log.linkrev
2223 return [ln(r) for r in log if llr(r) in revset]
2225 return [ln(r) for r in log if llr(r) in revset]
2224
2226
2225 progress = self.ui.progress
2227 progress = self.ui.progress
2226 _bundling = _('bundling')
2228 _bundling = _('bundling')
2227 _changesets = _('changesets')
2229 _changesets = _('changesets')
2228 _manifests = _('manifests')
2230 _manifests = _('manifests')
2229 _files = _('files')
2231 _files = _('files')
2230
2232
2231 def lookup(revlog, x):
2233 def lookup(revlog, x):
2232 if revlog == cl:
2234 if revlog == cl:
2233 c = cl.read(x)
2235 c = cl.read(x)
2234 changedfiles.update(c[3])
2236 changedfiles.update(c[3])
2235 mfs.setdefault(c[0], x)
2237 mfs.setdefault(c[0], x)
2236 count[0] += 1
2238 count[0] += 1
2237 progress(_bundling, count[0],
2239 progress(_bundling, count[0],
2238 unit=_changesets, total=count[1])
2240 unit=_changesets, total=count[1])
2239 return x
2241 return x
2240 elif revlog == mf:
2242 elif revlog == mf:
2241 count[0] += 1
2243 count[0] += 1
2242 progress(_bundling, count[0],
2244 progress(_bundling, count[0],
2243 unit=_manifests, total=count[1])
2245 unit=_manifests, total=count[1])
2244 return cl.node(revlog.linkrev(revlog.rev(x)))
2246 return cl.node(revlog.linkrev(revlog.rev(x)))
2245 else:
2247 else:
2246 progress(_bundling, count[0], item=fstate[0],
2248 progress(_bundling, count[0], item=fstate[0],
2247 total=count[1], unit=_files)
2249 total=count[1], unit=_files)
2248 return cl.node(revlog.linkrev(revlog.rev(x)))
2250 return cl.node(revlog.linkrev(revlog.rev(x)))
2249
2251
2250 bundler = changegroup.bundle10(lookup)
2252 bundler = changegroup.bundle10(lookup)
2251 reorder = self.ui.config('bundle', 'reorder', 'auto')
2253 reorder = self.ui.config('bundle', 'reorder', 'auto')
2252 if reorder == 'auto':
2254 if reorder == 'auto':
2253 reorder = None
2255 reorder = None
2254 else:
2256 else:
2255 reorder = util.parsebool(reorder)
2257 reorder = util.parsebool(reorder)
2256
2258
2257 def gengroup():
2259 def gengroup():
2258 '''yield a sequence of changegroup chunks (strings)'''
2260 '''yield a sequence of changegroup chunks (strings)'''
2259 # construct a list of all changed files
2261 # construct a list of all changed files
2260
2262
2261 count[:] = [0, len(nodes)]
2263 count[:] = [0, len(nodes)]
2262 for chunk in cl.group(nodes, bundler, reorder=reorder):
2264 for chunk in cl.group(nodes, bundler, reorder=reorder):
2263 yield chunk
2265 yield chunk
2264 progress(_bundling, None)
2266 progress(_bundling, None)
2265
2267
2266 count[:] = [0, len(mfs)]
2268 count[:] = [0, len(mfs)]
2267 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2269 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2268 yield chunk
2270 yield chunk
2269 progress(_bundling, None)
2271 progress(_bundling, None)
2270
2272
2271 count[:] = [0, len(changedfiles)]
2273 count[:] = [0, len(changedfiles)]
2272 for fname in sorted(changedfiles):
2274 for fname in sorted(changedfiles):
2273 filerevlog = self.file(fname)
2275 filerevlog = self.file(fname)
2274 if not len(filerevlog):
2276 if not len(filerevlog):
2275 raise util.Abort(_("empty or missing revlog for %s")
2277 raise util.Abort(_("empty or missing revlog for %s")
2276 % fname)
2278 % fname)
2277 fstate[0] = fname
2279 fstate[0] = fname
2278 nodelist = gennodelst(filerevlog)
2280 nodelist = gennodelst(filerevlog)
2279 if nodelist:
2281 if nodelist:
2280 count[0] += 1
2282 count[0] += 1
2281 yield bundler.fileheader(fname)
2283 yield bundler.fileheader(fname)
2282 for chunk in filerevlog.group(nodelist, bundler, reorder):
2284 for chunk in filerevlog.group(nodelist, bundler, reorder):
2283 yield chunk
2285 yield chunk
2284 yield bundler.close()
2286 yield bundler.close()
2285 progress(_bundling, None)
2287 progress(_bundling, None)
2286
2288
2287 if nodes:
2289 if nodes:
2288 self.hook('outgoing', node=hex(nodes[0]), source=source)
2290 self.hook('outgoing', node=hex(nodes[0]), source=source)
2289
2291
2290 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2292 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2291
2293
2292 def addchangegroup(self, source, srctype, url, emptyok=False):
2294 def addchangegroup(self, source, srctype, url, emptyok=False):
2293 """Add the changegroup returned by source.read() to this repo.
2295 """Add the changegroup returned by source.read() to this repo.
2294 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2296 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2295 the URL of the repo where this changegroup is coming from.
2297 the URL of the repo where this changegroup is coming from.
2296
2298
2297 Return an integer summarizing the change to this repo:
2299 Return an integer summarizing the change to this repo:
2298 - nothing changed or no source: 0
2300 - nothing changed or no source: 0
2299 - more heads than before: 1+added heads (2..n)
2301 - more heads than before: 1+added heads (2..n)
2300 - fewer heads than before: -1-removed heads (-2..-n)
2302 - fewer heads than before: -1-removed heads (-2..-n)
2301 - number of heads stays the same: 1
2303 - number of heads stays the same: 1
2302 """
2304 """
2303 def csmap(x):
2305 def csmap(x):
2304 self.ui.debug("add changeset %s\n" % short(x))
2306 self.ui.debug("add changeset %s\n" % short(x))
2305 return len(cl)
2307 return len(cl)
2306
2308
2307 def revmap(x):
2309 def revmap(x):
2308 return cl.rev(x)
2310 return cl.rev(x)
2309
2311
2310 if not source:
2312 if not source:
2311 return 0
2313 return 0
2312
2314
2313 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2315 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2314
2316
2315 changesets = files = revisions = 0
2317 changesets = files = revisions = 0
2316 efiles = set()
2318 efiles = set()
2317
2319
2318 # write changelog data to temp files so concurrent readers will not see
2320 # write changelog data to temp files so concurrent readers will not see
2319 # inconsistent view
2321 # inconsistent view
2320 cl = self.changelog
2322 cl = self.changelog
2321 cl.delayupdate()
2323 cl.delayupdate()
2322 oldheads = cl.heads()
2324 oldheads = cl.heads()
2323
2325
2324 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2326 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2325 try:
2327 try:
2326 trp = weakref.proxy(tr)
2328 trp = weakref.proxy(tr)
2327 # pull off the changeset group
2329 # pull off the changeset group
2328 self.ui.status(_("adding changesets\n"))
2330 self.ui.status(_("adding changesets\n"))
2329 clstart = len(cl)
2331 clstart = len(cl)
2330 class prog(object):
2332 class prog(object):
2331 step = _('changesets')
2333 step = _('changesets')
2332 count = 1
2334 count = 1
2333 ui = self.ui
2335 ui = self.ui
2334 total = None
2336 total = None
2335 def __call__(self):
2337 def __call__(self):
2336 self.ui.progress(self.step, self.count, unit=_('chunks'),
2338 self.ui.progress(self.step, self.count, unit=_('chunks'),
2337 total=self.total)
2339 total=self.total)
2338 self.count += 1
2340 self.count += 1
2339 pr = prog()
2341 pr = prog()
2340 source.callback = pr
2342 source.callback = pr
2341
2343
2342 source.changelogheader()
2344 source.changelogheader()
2343 srccontent = cl.addgroup(source, csmap, trp)
2345 srccontent = cl.addgroup(source, csmap, trp)
2344 if not (srccontent or emptyok):
2346 if not (srccontent or emptyok):
2345 raise util.Abort(_("received changelog group is empty"))
2347 raise util.Abort(_("received changelog group is empty"))
2346 clend = len(cl)
2348 clend = len(cl)
2347 changesets = clend - clstart
2349 changesets = clend - clstart
2348 for c in xrange(clstart, clend):
2350 for c in xrange(clstart, clend):
2349 efiles.update(self[c].files())
2351 efiles.update(self[c].files())
2350 efiles = len(efiles)
2352 efiles = len(efiles)
2351 self.ui.progress(_('changesets'), None)
2353 self.ui.progress(_('changesets'), None)
2352
2354
2353 # pull off the manifest group
2355 # pull off the manifest group
2354 self.ui.status(_("adding manifests\n"))
2356 self.ui.status(_("adding manifests\n"))
2355 pr.step = _('manifests')
2357 pr.step = _('manifests')
2356 pr.count = 1
2358 pr.count = 1
2357 pr.total = changesets # manifests <= changesets
2359 pr.total = changesets # manifests <= changesets
2358 # no need to check for empty manifest group here:
2360 # no need to check for empty manifest group here:
2359 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2361 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2360 # no new manifest will be created and the manifest group will
2362 # no new manifest will be created and the manifest group will
2361 # be empty during the pull
2363 # be empty during the pull
2362 source.manifestheader()
2364 source.manifestheader()
2363 self.manifest.addgroup(source, revmap, trp)
2365 self.manifest.addgroup(source, revmap, trp)
2364 self.ui.progress(_('manifests'), None)
2366 self.ui.progress(_('manifests'), None)
2365
2367
2366 needfiles = {}
2368 needfiles = {}
2367 if self.ui.configbool('server', 'validate', default=False):
2369 if self.ui.configbool('server', 'validate', default=False):
2368 # validate incoming csets have their manifests
2370 # validate incoming csets have their manifests
2369 for cset in xrange(clstart, clend):
2371 for cset in xrange(clstart, clend):
2370 mfest = self.changelog.read(self.changelog.node(cset))[0]
2372 mfest = self.changelog.read(self.changelog.node(cset))[0]
2371 mfest = self.manifest.readdelta(mfest)
2373 mfest = self.manifest.readdelta(mfest)
2372 # store file nodes we must see
2374 # store file nodes we must see
2373 for f, n in mfest.iteritems():
2375 for f, n in mfest.iteritems():
2374 needfiles.setdefault(f, set()).add(n)
2376 needfiles.setdefault(f, set()).add(n)
2375
2377
2376 # process the files
2378 # process the files
2377 self.ui.status(_("adding file changes\n"))
2379 self.ui.status(_("adding file changes\n"))
2378 pr.step = _('files')
2380 pr.step = _('files')
2379 pr.count = 1
2381 pr.count = 1
2380 pr.total = efiles
2382 pr.total = efiles
2381 source.callback = None
2383 source.callback = None
2382
2384
2383 while True:
2385 while True:
2384 chunkdata = source.filelogheader()
2386 chunkdata = source.filelogheader()
2385 if not chunkdata:
2387 if not chunkdata:
2386 break
2388 break
2387 f = chunkdata["filename"]
2389 f = chunkdata["filename"]
2388 self.ui.debug("adding %s revisions\n" % f)
2390 self.ui.debug("adding %s revisions\n" % f)
2389 pr()
2391 pr()
2390 fl = self.file(f)
2392 fl = self.file(f)
2391 o = len(fl)
2393 o = len(fl)
2392 if not fl.addgroup(source, revmap, trp):
2394 if not fl.addgroup(source, revmap, trp):
2393 raise util.Abort(_("received file revlog group is empty"))
2395 raise util.Abort(_("received file revlog group is empty"))
2394 revisions += len(fl) - o
2396 revisions += len(fl) - o
2395 files += 1
2397 files += 1
2396 if f in needfiles:
2398 if f in needfiles:
2397 needs = needfiles[f]
2399 needs = needfiles[f]
2398 for new in xrange(o, len(fl)):
2400 for new in xrange(o, len(fl)):
2399 n = fl.node(new)
2401 n = fl.node(new)
2400 if n in needs:
2402 if n in needs:
2401 needs.remove(n)
2403 needs.remove(n)
2402 if not needs:
2404 if not needs:
2403 del needfiles[f]
2405 del needfiles[f]
2404 self.ui.progress(_('files'), None)
2406 self.ui.progress(_('files'), None)
2405
2407
2406 for f, needs in needfiles.iteritems():
2408 for f, needs in needfiles.iteritems():
2407 fl = self.file(f)
2409 fl = self.file(f)
2408 for n in needs:
2410 for n in needs:
2409 try:
2411 try:
2410 fl.rev(n)
2412 fl.rev(n)
2411 except error.LookupError:
2413 except error.LookupError:
2412 raise util.Abort(
2414 raise util.Abort(
2413 _('missing file data for %s:%s - run hg verify') %
2415 _('missing file data for %s:%s - run hg verify') %
2414 (f, hex(n)))
2416 (f, hex(n)))
2415
2417
2416 dh = 0
2418 dh = 0
2417 if oldheads:
2419 if oldheads:
2418 heads = cl.heads()
2420 heads = cl.heads()
2419 dh = len(heads) - len(oldheads)
2421 dh = len(heads) - len(oldheads)
2420 for h in heads:
2422 for h in heads:
2421 if h not in oldheads and self[h].closesbranch():
2423 if h not in oldheads and self[h].closesbranch():
2422 dh -= 1
2424 dh -= 1
2423 htext = ""
2425 htext = ""
2424 if dh:
2426 if dh:
2425 htext = _(" (%+d heads)") % dh
2427 htext = _(" (%+d heads)") % dh
2426
2428
2427 self.ui.status(_("added %d changesets"
2429 self.ui.status(_("added %d changesets"
2428 " with %d changes to %d files%s\n")
2430 " with %d changes to %d files%s\n")
2429 % (changesets, revisions, files, htext))
2431 % (changesets, revisions, files, htext))
2430 obsolete.clearobscaches(self)
2432 obsolete.clearobscaches(self)
2431
2433
2432 if changesets > 0:
2434 if changesets > 0:
2433 p = lambda: cl.writepending() and self.root or ""
2435 p = lambda: cl.writepending() and self.root or ""
2434 self.hook('pretxnchangegroup', throw=True,
2436 self.hook('pretxnchangegroup', throw=True,
2435 node=hex(cl.node(clstart)), source=srctype,
2437 node=hex(cl.node(clstart)), source=srctype,
2436 url=url, pending=p)
2438 url=url, pending=p)
2437
2439
2438 added = [cl.node(r) for r in xrange(clstart, clend)]
2440 added = [cl.node(r) for r in xrange(clstart, clend)]
2439 publishing = self.ui.configbool('phases', 'publish', True)
2441 publishing = self.ui.configbool('phases', 'publish', True)
2440 if srctype == 'push':
2442 if srctype == 'push':
2441 # Old server can not push the boundary themself.
2443 # Old server can not push the boundary themself.
2442 # New server won't push the boundary if changeset already
2444 # New server won't push the boundary if changeset already
2443 # existed locally as secrete
2445 # existed locally as secrete
2444 #
2446 #
2445 # We should not use added here but the list of all change in
2447 # We should not use added here but the list of all change in
2446 # the bundle
2448 # the bundle
2447 if publishing:
2449 if publishing:
2448 phases.advanceboundary(self, phases.public, srccontent)
2450 phases.advanceboundary(self, phases.public, srccontent)
2449 else:
2451 else:
2450 phases.advanceboundary(self, phases.draft, srccontent)
2452 phases.advanceboundary(self, phases.draft, srccontent)
2451 phases.retractboundary(self, phases.draft, added)
2453 phases.retractboundary(self, phases.draft, added)
2452 elif srctype != 'strip':
2454 elif srctype != 'strip':
2453 # publishing only alter behavior during push
2455 # publishing only alter behavior during push
2454 #
2456 #
2455 # strip should not touch boundary at all
2457 # strip should not touch boundary at all
2456 phases.retractboundary(self, phases.draft, added)
2458 phases.retractboundary(self, phases.draft, added)
2457
2459
2458 # make changelog see real files again
2460 # make changelog see real files again
2459 cl.finalize(trp)
2461 cl.finalize(trp)
2460
2462
2461 tr.close()
2463 tr.close()
2462
2464
2463 if changesets > 0:
2465 if changesets > 0:
2464 self.updatebranchcache()
2466 self.updatebranchcache()
2465 def runhooks():
2467 def runhooks():
2466 # forcefully update the on-disk branch cache
2468 # forcefully update the on-disk branch cache
2467 self.ui.debug("updating the branch cache\n")
2469 self.ui.debug("updating the branch cache\n")
2468 self.hook("changegroup", node=hex(cl.node(clstart)),
2470 self.hook("changegroup", node=hex(cl.node(clstart)),
2469 source=srctype, url=url)
2471 source=srctype, url=url)
2470
2472
2471 for n in added:
2473 for n in added:
2472 self.hook("incoming", node=hex(n), source=srctype,
2474 self.hook("incoming", node=hex(n), source=srctype,
2473 url=url)
2475 url=url)
2474 self._afterlock(runhooks)
2476 self._afterlock(runhooks)
2475
2477
2476 finally:
2478 finally:
2477 tr.release()
2479 tr.release()
2478 # never return 0 here:
2480 # never return 0 here:
2479 if dh < 0:
2481 if dh < 0:
2480 return dh - 1
2482 return dh - 1
2481 else:
2483 else:
2482 return dh + 1
2484 return dh + 1
2483
2485
2484 def stream_in(self, remote, requirements):
2486 def stream_in(self, remote, requirements):
2485 lock = self.lock()
2487 lock = self.lock()
2486 try:
2488 try:
2487 # Save remote branchmap. We will use it later
2489 # Save remote branchmap. We will use it later
2488 # to speed up branchcache creation
2490 # to speed up branchcache creation
2489 rbranchmap = None
2491 rbranchmap = None
2490 if remote.capable("branchmap"):
2492 if remote.capable("branchmap"):
2491 rbranchmap = remote.branchmap()
2493 rbranchmap = remote.branchmap()
2492
2494
2493 fp = remote.stream_out()
2495 fp = remote.stream_out()
2494 l = fp.readline()
2496 l = fp.readline()
2495 try:
2497 try:
2496 resp = int(l)
2498 resp = int(l)
2497 except ValueError:
2499 except ValueError:
2498 raise error.ResponseError(
2500 raise error.ResponseError(
2499 _('unexpected response from remote server:'), l)
2501 _('unexpected response from remote server:'), l)
2500 if resp == 1:
2502 if resp == 1:
2501 raise util.Abort(_('operation forbidden by server'))
2503 raise util.Abort(_('operation forbidden by server'))
2502 elif resp == 2:
2504 elif resp == 2:
2503 raise util.Abort(_('locking the remote repository failed'))
2505 raise util.Abort(_('locking the remote repository failed'))
2504 elif resp != 0:
2506 elif resp != 0:
2505 raise util.Abort(_('the server sent an unknown error code'))
2507 raise util.Abort(_('the server sent an unknown error code'))
2506 self.ui.status(_('streaming all changes\n'))
2508 self.ui.status(_('streaming all changes\n'))
2507 l = fp.readline()
2509 l = fp.readline()
2508 try:
2510 try:
2509 total_files, total_bytes = map(int, l.split(' ', 1))
2511 total_files, total_bytes = map(int, l.split(' ', 1))
2510 except (ValueError, TypeError):
2512 except (ValueError, TypeError):
2511 raise error.ResponseError(
2513 raise error.ResponseError(
2512 _('unexpected response from remote server:'), l)
2514 _('unexpected response from remote server:'), l)
2513 self.ui.status(_('%d files to transfer, %s of data\n') %
2515 self.ui.status(_('%d files to transfer, %s of data\n') %
2514 (total_files, util.bytecount(total_bytes)))
2516 (total_files, util.bytecount(total_bytes)))
2515 handled_bytes = 0
2517 handled_bytes = 0
2516 self.ui.progress(_('clone'), 0, total=total_bytes)
2518 self.ui.progress(_('clone'), 0, total=total_bytes)
2517 start = time.time()
2519 start = time.time()
2518 for i in xrange(total_files):
2520 for i in xrange(total_files):
2519 # XXX doesn't support '\n' or '\r' in filenames
2521 # XXX doesn't support '\n' or '\r' in filenames
2520 l = fp.readline()
2522 l = fp.readline()
2521 try:
2523 try:
2522 name, size = l.split('\0', 1)
2524 name, size = l.split('\0', 1)
2523 size = int(size)
2525 size = int(size)
2524 except (ValueError, TypeError):
2526 except (ValueError, TypeError):
2525 raise error.ResponseError(
2527 raise error.ResponseError(
2526 _('unexpected response from remote server:'), l)
2528 _('unexpected response from remote server:'), l)
2527 if self.ui.debugflag:
2529 if self.ui.debugflag:
2528 self.ui.debug('adding %s (%s)\n' %
2530 self.ui.debug('adding %s (%s)\n' %
2529 (name, util.bytecount(size)))
2531 (name, util.bytecount(size)))
2530 # for backwards compat, name was partially encoded
2532 # for backwards compat, name was partially encoded
2531 ofp = self.sopener(store.decodedir(name), 'w')
2533 ofp = self.sopener(store.decodedir(name), 'w')
2532 for chunk in util.filechunkiter(fp, limit=size):
2534 for chunk in util.filechunkiter(fp, limit=size):
2533 handled_bytes += len(chunk)
2535 handled_bytes += len(chunk)
2534 self.ui.progress(_('clone'), handled_bytes,
2536 self.ui.progress(_('clone'), handled_bytes,
2535 total=total_bytes)
2537 total=total_bytes)
2536 ofp.write(chunk)
2538 ofp.write(chunk)
2537 ofp.close()
2539 ofp.close()
2538 elapsed = time.time() - start
2540 elapsed = time.time() - start
2539 if elapsed <= 0:
2541 if elapsed <= 0:
2540 elapsed = 0.001
2542 elapsed = 0.001
2541 self.ui.progress(_('clone'), None)
2543 self.ui.progress(_('clone'), None)
2542 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2544 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2543 (util.bytecount(total_bytes), elapsed,
2545 (util.bytecount(total_bytes), elapsed,
2544 util.bytecount(total_bytes / elapsed)))
2546 util.bytecount(total_bytes / elapsed)))
2545
2547
2546 # new requirements = old non-format requirements +
2548 # new requirements = old non-format requirements +
2547 # new format-related
2549 # new format-related
2548 # requirements from the streamed-in repository
2550 # requirements from the streamed-in repository
2549 requirements.update(set(self.requirements) - self.supportedformats)
2551 requirements.update(set(self.requirements) - self.supportedformats)
2550 self._applyrequirements(requirements)
2552 self._applyrequirements(requirements)
2551 self._writerequirements()
2553 self._writerequirements()
2552
2554
2553 if rbranchmap:
2555 if rbranchmap:
2554 rbheads = []
2556 rbheads = []
2555 for bheads in rbranchmap.itervalues():
2557 for bheads in rbranchmap.itervalues():
2556 rbheads.extend(bheads)
2558 rbheads.extend(bheads)
2557
2559
2558 self.branchcache = rbranchmap
2560 self.branchcache = rbranchmap
2559 if rbheads:
2561 if rbheads:
2560 rtiprev = max((int(self.changelog.rev(node))
2562 rtiprev = max((int(self.changelog.rev(node))
2561 for node in rbheads))
2563 for node in rbheads))
2562 self._writebranchcache(self.branchcache,
2564 self._writebranchcache(self.branchcache,
2563 self[rtiprev].node(), rtiprev)
2565 self[rtiprev].node(), rtiprev)
2564 self.invalidate()
2566 self.invalidate()
2565 return len(self.heads()) + 1
2567 return len(self.heads()) + 1
2566 finally:
2568 finally:
2567 lock.release()
2569 lock.release()
2568
2570
2569 def clone(self, remote, heads=[], stream=False):
2571 def clone(self, remote, heads=[], stream=False):
2570 '''clone remote repository.
2572 '''clone remote repository.
2571
2573
2572 keyword arguments:
2574 keyword arguments:
2573 heads: list of revs to clone (forces use of pull)
2575 heads: list of revs to clone (forces use of pull)
2574 stream: use streaming clone if possible'''
2576 stream: use streaming clone if possible'''
2575
2577
2576 # now, all clients that can request uncompressed clones can
2578 # now, all clients that can request uncompressed clones can
2577 # read repo formats supported by all servers that can serve
2579 # read repo formats supported by all servers that can serve
2578 # them.
2580 # them.
2579
2581
2580 # if revlog format changes, client will have to check version
2582 # if revlog format changes, client will have to check version
2581 # and format flags on "stream" capability, and use
2583 # and format flags on "stream" capability, and use
2582 # uncompressed only if compatible.
2584 # uncompressed only if compatible.
2583
2585
2584 if not stream:
2586 if not stream:
2585 # if the server explicitly prefers to stream (for fast LANs)
2587 # if the server explicitly prefers to stream (for fast LANs)
2586 stream = remote.capable('stream-preferred')
2588 stream = remote.capable('stream-preferred')
2587
2589
2588 if stream and not heads:
2590 if stream and not heads:
2589 # 'stream' means remote revlog format is revlogv1 only
2591 # 'stream' means remote revlog format is revlogv1 only
2590 if remote.capable('stream'):
2592 if remote.capable('stream'):
2591 return self.stream_in(remote, set(('revlogv1',)))
2593 return self.stream_in(remote, set(('revlogv1',)))
2592 # otherwise, 'streamreqs' contains the remote revlog format
2594 # otherwise, 'streamreqs' contains the remote revlog format
2593 streamreqs = remote.capable('streamreqs')
2595 streamreqs = remote.capable('streamreqs')
2594 if streamreqs:
2596 if streamreqs:
2595 streamreqs = set(streamreqs.split(','))
2597 streamreqs = set(streamreqs.split(','))
2596 # if we support it, stream in and adjust our requirements
2598 # if we support it, stream in and adjust our requirements
2597 if not streamreqs - self.supportedformats:
2599 if not streamreqs - self.supportedformats:
2598 return self.stream_in(remote, streamreqs)
2600 return self.stream_in(remote, streamreqs)
2599 return self.pull(remote, heads)
2601 return self.pull(remote, heads)
2600
2602
2601 def pushkey(self, namespace, key, old, new):
2603 def pushkey(self, namespace, key, old, new):
2602 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2604 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2603 old=old, new=new)
2605 old=old, new=new)
2604 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2606 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2605 ret = pushkey.push(self, namespace, key, old, new)
2607 ret = pushkey.push(self, namespace, key, old, new)
2606 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2608 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2607 ret=ret)
2609 ret=ret)
2608 return ret
2610 return ret
2609
2611
2610 def listkeys(self, namespace):
2612 def listkeys(self, namespace):
2611 self.hook('prelistkeys', throw=True, namespace=namespace)
2613 self.hook('prelistkeys', throw=True, namespace=namespace)
2612 self.ui.debug('listing keys for "%s"\n' % namespace)
2614 self.ui.debug('listing keys for "%s"\n' % namespace)
2613 values = pushkey.list(self, namespace)
2615 values = pushkey.list(self, namespace)
2614 self.hook('listkeys', namespace=namespace, values=values)
2616 self.hook('listkeys', namespace=namespace, values=values)
2615 return values
2617 return values
2616
2618
2617 def debugwireargs(self, one, two, three=None, four=None, five=None):
2619 def debugwireargs(self, one, two, three=None, four=None, five=None):
2618 '''used to test argument passing over the wire'''
2620 '''used to test argument passing over the wire'''
2619 return "%s %s %s %s %s" % (one, two, three, four, five)
2621 return "%s %s %s %s %s" % (one, two, three, four, five)
2620
2622
2621 def savecommitmessage(self, text):
2623 def savecommitmessage(self, text):
2622 fp = self.opener('last-message.txt', 'wb')
2624 fp = self.opener('last-message.txt', 'wb')
2623 try:
2625 try:
2624 fp.write(text)
2626 fp.write(text)
2625 finally:
2627 finally:
2626 fp.close()
2628 fp.close()
2627 return self.pathto(fp.name[len(self.root)+1:])
2629 return self.pathto(fp.name[len(self.root)+1:])
2628
2630
2629 # used to avoid circular references so destructors work
2631 # used to avoid circular references so destructors work
2630 def aftertrans(files):
2632 def aftertrans(files):
2631 renamefiles = [tuple(t) for t in files]
2633 renamefiles = [tuple(t) for t in files]
2632 def a():
2634 def a():
2633 for src, dest in renamefiles:
2635 for src, dest in renamefiles:
2634 try:
2636 try:
2635 util.rename(src, dest)
2637 util.rename(src, dest)
2636 except OSError: # journal file does not yet exist
2638 except OSError: # journal file does not yet exist
2637 pass
2639 pass
2638 return a
2640 return a
2639
2641
2640 def undoname(fn):
2642 def undoname(fn):
2641 base, name = os.path.split(fn)
2643 base, name = os.path.split(fn)
2642 assert name.startswith('journal')
2644 assert name.startswith('journal')
2643 return os.path.join(base, name.replace('journal', 'undo', 1))
2645 return os.path.join(base, name.replace('journal', 'undo', 1))
2644
2646
2645 def instance(ui, path, create):
2647 def instance(ui, path, create):
2646 return localrepository(ui, util.urllocalpath(path), create)
2648 return localrepository(ui, util.urllocalpath(path), create)
2647
2649
2648 def islocal(path):
2650 def islocal(path):
2649 return True
2651 return True
General Comments 0
You need to be logged in to leave comments. Login now