##// END OF EJS Templates
clfilter: ensure `rollback` is run unfiltered...
Pierre-Yves David -
r17998:ec4c855d default
parent child Browse files
Show More
@@ -1,2648 +1,2649 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import bin, hex, nullid, nullrev, short
7 from node import bin, hex, nullid, nullrev, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock, transaction, store, encoding, base85
11 import lock, transaction, store, encoding, base85
12 import scmutil, util, extensions, hook, error, revset
12 import scmutil, util, extensions, hook, error, revset
13 import match as matchmod
13 import match as matchmod
14 import merge as mergemod
14 import merge as mergemod
15 import tags as tagsmod
15 import tags as tagsmod
16 from lock import release
16 from lock import release
17 import weakref, errno, os, time, inspect
17 import weakref, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19 filecache = scmutil.filecache
19 filecache = scmutil.filecache
20
20
21 class storecache(filecache):
21 class storecache(filecache):
22 """filecache for files in the store"""
22 """filecache for files in the store"""
23 def join(self, obj, fname):
23 def join(self, obj, fname):
24 return obj.sjoin(fname)
24 return obj.sjoin(fname)
25
25
26 def unfilteredmeth(orig):
26 def unfilteredmeth(orig):
27 """decorate method that always need to be run on unfiltered version"""
27 """decorate method that always need to be run on unfiltered version"""
28 def wrapper(repo, *args, **kwargs):
28 def wrapper(repo, *args, **kwargs):
29 return orig(repo.unfiltered(), *args, **kwargs)
29 return orig(repo.unfiltered(), *args, **kwargs)
30 return wrapper
30 return wrapper
31
31
32 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
32 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
33 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
33 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
34
34
35 class localpeer(peer.peerrepository):
35 class localpeer(peer.peerrepository):
36 '''peer for a local repo; reflects only the most recent API'''
36 '''peer for a local repo; reflects only the most recent API'''
37
37
38 def __init__(self, repo, caps=MODERNCAPS):
38 def __init__(self, repo, caps=MODERNCAPS):
39 peer.peerrepository.__init__(self)
39 peer.peerrepository.__init__(self)
40 self._repo = repo
40 self._repo = repo
41 self.ui = repo.ui
41 self.ui = repo.ui
42 self._caps = repo._restrictcapabilities(caps)
42 self._caps = repo._restrictcapabilities(caps)
43 self.requirements = repo.requirements
43 self.requirements = repo.requirements
44 self.supportedformats = repo.supportedformats
44 self.supportedformats = repo.supportedformats
45
45
46 def close(self):
46 def close(self):
47 self._repo.close()
47 self._repo.close()
48
48
49 def _capabilities(self):
49 def _capabilities(self):
50 return self._caps
50 return self._caps
51
51
52 def local(self):
52 def local(self):
53 return self._repo
53 return self._repo
54
54
55 def canpush(self):
55 def canpush(self):
56 return True
56 return True
57
57
58 def url(self):
58 def url(self):
59 return self._repo.url()
59 return self._repo.url()
60
60
61 def lookup(self, key):
61 def lookup(self, key):
62 return self._repo.lookup(key)
62 return self._repo.lookup(key)
63
63
64 def branchmap(self):
64 def branchmap(self):
65 return discovery.visiblebranchmap(self._repo)
65 return discovery.visiblebranchmap(self._repo)
66
66
67 def heads(self):
67 def heads(self):
68 return discovery.visibleheads(self._repo)
68 return discovery.visibleheads(self._repo)
69
69
70 def known(self, nodes):
70 def known(self, nodes):
71 return self._repo.known(nodes)
71 return self._repo.known(nodes)
72
72
73 def getbundle(self, source, heads=None, common=None):
73 def getbundle(self, source, heads=None, common=None):
74 return self._repo.getbundle(source, heads=heads, common=common)
74 return self._repo.getbundle(source, heads=heads, common=common)
75
75
76 # TODO We might want to move the next two calls into legacypeer and add
76 # TODO We might want to move the next two calls into legacypeer and add
77 # unbundle instead.
77 # unbundle instead.
78
78
79 def lock(self):
79 def lock(self):
80 return self._repo.lock()
80 return self._repo.lock()
81
81
82 def addchangegroup(self, cg, source, url):
82 def addchangegroup(self, cg, source, url):
83 return self._repo.addchangegroup(cg, source, url)
83 return self._repo.addchangegroup(cg, source, url)
84
84
85 def pushkey(self, namespace, key, old, new):
85 def pushkey(self, namespace, key, old, new):
86 return self._repo.pushkey(namespace, key, old, new)
86 return self._repo.pushkey(namespace, key, old, new)
87
87
88 def listkeys(self, namespace):
88 def listkeys(self, namespace):
89 return self._repo.listkeys(namespace)
89 return self._repo.listkeys(namespace)
90
90
91 def debugwireargs(self, one, two, three=None, four=None, five=None):
91 def debugwireargs(self, one, two, three=None, four=None, five=None):
92 '''used to test argument passing over the wire'''
92 '''used to test argument passing over the wire'''
93 return "%s %s %s %s %s" % (one, two, three, four, five)
93 return "%s %s %s %s %s" % (one, two, three, four, five)
94
94
95 class locallegacypeer(localpeer):
95 class locallegacypeer(localpeer):
96 '''peer extension which implements legacy methods too; used for tests with
96 '''peer extension which implements legacy methods too; used for tests with
97 restricted capabilities'''
97 restricted capabilities'''
98
98
99 def __init__(self, repo):
99 def __init__(self, repo):
100 localpeer.__init__(self, repo, caps=LEGACYCAPS)
100 localpeer.__init__(self, repo, caps=LEGACYCAPS)
101
101
102 def branches(self, nodes):
102 def branches(self, nodes):
103 return self._repo.branches(nodes)
103 return self._repo.branches(nodes)
104
104
105 def between(self, pairs):
105 def between(self, pairs):
106 return self._repo.between(pairs)
106 return self._repo.between(pairs)
107
107
108 def changegroup(self, basenodes, source):
108 def changegroup(self, basenodes, source):
109 return self._repo.changegroup(basenodes, source)
109 return self._repo.changegroup(basenodes, source)
110
110
111 def changegroupsubset(self, bases, heads, source):
111 def changegroupsubset(self, bases, heads, source):
112 return self._repo.changegroupsubset(bases, heads, source)
112 return self._repo.changegroupsubset(bases, heads, source)
113
113
114 class localrepository(object):
114 class localrepository(object):
115
115
116 supportedformats = set(('revlogv1', 'generaldelta'))
116 supportedformats = set(('revlogv1', 'generaldelta'))
117 supported = supportedformats | set(('store', 'fncache', 'shared',
117 supported = supportedformats | set(('store', 'fncache', 'shared',
118 'dotencode'))
118 'dotencode'))
119 openerreqs = set(('revlogv1', 'generaldelta'))
119 openerreqs = set(('revlogv1', 'generaldelta'))
120 requirements = ['revlogv1']
120 requirements = ['revlogv1']
121
121
122 def _baserequirements(self, create):
122 def _baserequirements(self, create):
123 return self.requirements[:]
123 return self.requirements[:]
124
124
125 def __init__(self, baseui, path=None, create=False):
125 def __init__(self, baseui, path=None, create=False):
126 self.wvfs = scmutil.vfs(path, expand=True)
126 self.wvfs = scmutil.vfs(path, expand=True)
127 self.wopener = self.wvfs
127 self.wopener = self.wvfs
128 self.root = self.wvfs.base
128 self.root = self.wvfs.base
129 self.path = self.wvfs.join(".hg")
129 self.path = self.wvfs.join(".hg")
130 self.origroot = path
130 self.origroot = path
131 self.auditor = scmutil.pathauditor(self.root, self._checknested)
131 self.auditor = scmutil.pathauditor(self.root, self._checknested)
132 self.vfs = scmutil.vfs(self.path)
132 self.vfs = scmutil.vfs(self.path)
133 self.opener = self.vfs
133 self.opener = self.vfs
134 self.baseui = baseui
134 self.baseui = baseui
135 self.ui = baseui.copy()
135 self.ui = baseui.copy()
136 # A list of callback to shape the phase if no data were found.
136 # A list of callback to shape the phase if no data were found.
137 # Callback are in the form: func(repo, roots) --> processed root.
137 # Callback are in the form: func(repo, roots) --> processed root.
138 # This list it to be filled by extension during repo setup
138 # This list it to be filled by extension during repo setup
139 self._phasedefaults = []
139 self._phasedefaults = []
140 try:
140 try:
141 self.ui.readconfig(self.join("hgrc"), self.root)
141 self.ui.readconfig(self.join("hgrc"), self.root)
142 extensions.loadall(self.ui)
142 extensions.loadall(self.ui)
143 except IOError:
143 except IOError:
144 pass
144 pass
145
145
146 if not self.vfs.isdir():
146 if not self.vfs.isdir():
147 if create:
147 if create:
148 if not self.wvfs.exists():
148 if not self.wvfs.exists():
149 self.wvfs.makedirs()
149 self.wvfs.makedirs()
150 self.vfs.makedir(notindexed=True)
150 self.vfs.makedir(notindexed=True)
151 requirements = self._baserequirements(create)
151 requirements = self._baserequirements(create)
152 if self.ui.configbool('format', 'usestore', True):
152 if self.ui.configbool('format', 'usestore', True):
153 self.vfs.mkdir("store")
153 self.vfs.mkdir("store")
154 requirements.append("store")
154 requirements.append("store")
155 if self.ui.configbool('format', 'usefncache', True):
155 if self.ui.configbool('format', 'usefncache', True):
156 requirements.append("fncache")
156 requirements.append("fncache")
157 if self.ui.configbool('format', 'dotencode', True):
157 if self.ui.configbool('format', 'dotencode', True):
158 requirements.append('dotencode')
158 requirements.append('dotencode')
159 # create an invalid changelog
159 # create an invalid changelog
160 self.vfs.append(
160 self.vfs.append(
161 "00changelog.i",
161 "00changelog.i",
162 '\0\0\0\2' # represents revlogv2
162 '\0\0\0\2' # represents revlogv2
163 ' dummy changelog to prevent using the old repo layout'
163 ' dummy changelog to prevent using the old repo layout'
164 )
164 )
165 if self.ui.configbool('format', 'generaldelta', False):
165 if self.ui.configbool('format', 'generaldelta', False):
166 requirements.append("generaldelta")
166 requirements.append("generaldelta")
167 requirements = set(requirements)
167 requirements = set(requirements)
168 else:
168 else:
169 raise error.RepoError(_("repository %s not found") % path)
169 raise error.RepoError(_("repository %s not found") % path)
170 elif create:
170 elif create:
171 raise error.RepoError(_("repository %s already exists") % path)
171 raise error.RepoError(_("repository %s already exists") % path)
172 else:
172 else:
173 try:
173 try:
174 requirements = scmutil.readrequires(self.vfs, self.supported)
174 requirements = scmutil.readrequires(self.vfs, self.supported)
175 except IOError, inst:
175 except IOError, inst:
176 if inst.errno != errno.ENOENT:
176 if inst.errno != errno.ENOENT:
177 raise
177 raise
178 requirements = set()
178 requirements = set()
179
179
180 self.sharedpath = self.path
180 self.sharedpath = self.path
181 try:
181 try:
182 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
182 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
183 if not os.path.exists(s):
183 if not os.path.exists(s):
184 raise error.RepoError(
184 raise error.RepoError(
185 _('.hg/sharedpath points to nonexistent directory %s') % s)
185 _('.hg/sharedpath points to nonexistent directory %s') % s)
186 self.sharedpath = s
186 self.sharedpath = s
187 except IOError, inst:
187 except IOError, inst:
188 if inst.errno != errno.ENOENT:
188 if inst.errno != errno.ENOENT:
189 raise
189 raise
190
190
191 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
191 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
192 self.spath = self.store.path
192 self.spath = self.store.path
193 self.svfs = self.store.vfs
193 self.svfs = self.store.vfs
194 self.sopener = self.svfs
194 self.sopener = self.svfs
195 self.sjoin = self.store.join
195 self.sjoin = self.store.join
196 self.vfs.createmode = self.store.createmode
196 self.vfs.createmode = self.store.createmode
197 self._applyrequirements(requirements)
197 self._applyrequirements(requirements)
198 if create:
198 if create:
199 self._writerequirements()
199 self._writerequirements()
200
200
201
201
202 self._branchcache = None
202 self._branchcache = None
203 self._branchcachetip = None
203 self._branchcachetip = None
204 self.filterpats = {}
204 self.filterpats = {}
205 self._datafilters = {}
205 self._datafilters = {}
206 self._transref = self._lockref = self._wlockref = None
206 self._transref = self._lockref = self._wlockref = None
207
207
208 # A cache for various files under .hg/ that tracks file changes,
208 # A cache for various files under .hg/ that tracks file changes,
209 # (used by the filecache decorator)
209 # (used by the filecache decorator)
210 #
210 #
211 # Maps a property name to its util.filecacheentry
211 # Maps a property name to its util.filecacheentry
212 self._filecache = {}
212 self._filecache = {}
213
213
214 def close(self):
214 def close(self):
215 pass
215 pass
216
216
217 def _restrictcapabilities(self, caps):
217 def _restrictcapabilities(self, caps):
218 return caps
218 return caps
219
219
220 def _applyrequirements(self, requirements):
220 def _applyrequirements(self, requirements):
221 self.requirements = requirements
221 self.requirements = requirements
222 self.sopener.options = dict((r, 1) for r in requirements
222 self.sopener.options = dict((r, 1) for r in requirements
223 if r in self.openerreqs)
223 if r in self.openerreqs)
224
224
225 def _writerequirements(self):
225 def _writerequirements(self):
226 reqfile = self.opener("requires", "w")
226 reqfile = self.opener("requires", "w")
227 for r in self.requirements:
227 for r in self.requirements:
228 reqfile.write("%s\n" % r)
228 reqfile.write("%s\n" % r)
229 reqfile.close()
229 reqfile.close()
230
230
231 def _checknested(self, path):
231 def _checknested(self, path):
232 """Determine if path is a legal nested repository."""
232 """Determine if path is a legal nested repository."""
233 if not path.startswith(self.root):
233 if not path.startswith(self.root):
234 return False
234 return False
235 subpath = path[len(self.root) + 1:]
235 subpath = path[len(self.root) + 1:]
236 normsubpath = util.pconvert(subpath)
236 normsubpath = util.pconvert(subpath)
237
237
238 # XXX: Checking against the current working copy is wrong in
238 # XXX: Checking against the current working copy is wrong in
239 # the sense that it can reject things like
239 # the sense that it can reject things like
240 #
240 #
241 # $ hg cat -r 10 sub/x.txt
241 # $ hg cat -r 10 sub/x.txt
242 #
242 #
243 # if sub/ is no longer a subrepository in the working copy
243 # if sub/ is no longer a subrepository in the working copy
244 # parent revision.
244 # parent revision.
245 #
245 #
246 # However, it can of course also allow things that would have
246 # However, it can of course also allow things that would have
247 # been rejected before, such as the above cat command if sub/
247 # been rejected before, such as the above cat command if sub/
248 # is a subrepository now, but was a normal directory before.
248 # is a subrepository now, but was a normal directory before.
249 # The old path auditor would have rejected by mistake since it
249 # The old path auditor would have rejected by mistake since it
250 # panics when it sees sub/.hg/.
250 # panics when it sees sub/.hg/.
251 #
251 #
252 # All in all, checking against the working copy seems sensible
252 # All in all, checking against the working copy seems sensible
253 # since we want to prevent access to nested repositories on
253 # since we want to prevent access to nested repositories on
254 # the filesystem *now*.
254 # the filesystem *now*.
255 ctx = self[None]
255 ctx = self[None]
256 parts = util.splitpath(subpath)
256 parts = util.splitpath(subpath)
257 while parts:
257 while parts:
258 prefix = '/'.join(parts)
258 prefix = '/'.join(parts)
259 if prefix in ctx.substate:
259 if prefix in ctx.substate:
260 if prefix == normsubpath:
260 if prefix == normsubpath:
261 return True
261 return True
262 else:
262 else:
263 sub = ctx.sub(prefix)
263 sub = ctx.sub(prefix)
264 return sub.checknested(subpath[len(prefix) + 1:])
264 return sub.checknested(subpath[len(prefix) + 1:])
265 else:
265 else:
266 parts.pop()
266 parts.pop()
267 return False
267 return False
268
268
269 def peer(self):
269 def peer(self):
270 return localpeer(self) # not cached to avoid reference cycle
270 return localpeer(self) # not cached to avoid reference cycle
271
271
272 def unfiltered(self):
272 def unfiltered(self):
273 """Return unfiltered version of the repository
273 """Return unfiltered version of the repository
274
274
275 Intended to be ovewritten by filtered repo."""
275 Intended to be ovewritten by filtered repo."""
276 return self
276 return self
277
277
278 @filecache('bookmarks')
278 @filecache('bookmarks')
279 def _bookmarks(self):
279 def _bookmarks(self):
280 return bookmarks.bmstore(self)
280 return bookmarks.bmstore(self)
281
281
282 @filecache('bookmarks.current')
282 @filecache('bookmarks.current')
283 def _bookmarkcurrent(self):
283 def _bookmarkcurrent(self):
284 return bookmarks.readcurrent(self)
284 return bookmarks.readcurrent(self)
285
285
286 def bookmarkheads(self, bookmark):
286 def bookmarkheads(self, bookmark):
287 name = bookmark.split('@', 1)[0]
287 name = bookmark.split('@', 1)[0]
288 heads = []
288 heads = []
289 for mark, n in self._bookmarks.iteritems():
289 for mark, n in self._bookmarks.iteritems():
290 if mark.split('@', 1)[0] == name:
290 if mark.split('@', 1)[0] == name:
291 heads.append(n)
291 heads.append(n)
292 return heads
292 return heads
293
293
294 @storecache('phaseroots')
294 @storecache('phaseroots')
295 def _phasecache(self):
295 def _phasecache(self):
296 return phases.phasecache(self, self._phasedefaults)
296 return phases.phasecache(self, self._phasedefaults)
297
297
298 @storecache('obsstore')
298 @storecache('obsstore')
299 def obsstore(self):
299 def obsstore(self):
300 store = obsolete.obsstore(self.sopener)
300 store = obsolete.obsstore(self.sopener)
301 if store and not obsolete._enabled:
301 if store and not obsolete._enabled:
302 # message is rare enough to not be translated
302 # message is rare enough to not be translated
303 msg = 'obsolete feature not enabled but %i markers found!\n'
303 msg = 'obsolete feature not enabled but %i markers found!\n'
304 self.ui.warn(msg % len(list(store)))
304 self.ui.warn(msg % len(list(store)))
305 return store
305 return store
306
306
307 @propertycache
307 @propertycache
308 def hiddenrevs(self):
308 def hiddenrevs(self):
309 """hiddenrevs: revs that should be hidden by command and tools
309 """hiddenrevs: revs that should be hidden by command and tools
310
310
311 This set is carried on the repo to ease initialization and lazy
311 This set is carried on the repo to ease initialization and lazy
312 loading; it'll probably move back to changelog for efficiency and
312 loading; it'll probably move back to changelog for efficiency and
313 consistency reasons.
313 consistency reasons.
314
314
315 Note that the hiddenrevs will needs invalidations when
315 Note that the hiddenrevs will needs invalidations when
316 - a new changesets is added (possible unstable above extinct)
316 - a new changesets is added (possible unstable above extinct)
317 - a new obsolete marker is added (possible new extinct changeset)
317 - a new obsolete marker is added (possible new extinct changeset)
318
318
319 hidden changesets cannot have non-hidden descendants
319 hidden changesets cannot have non-hidden descendants
320 """
320 """
321 hidden = set()
321 hidden = set()
322 if self.obsstore:
322 if self.obsstore:
323 ### hide extinct changeset that are not accessible by any mean
323 ### hide extinct changeset that are not accessible by any mean
324 hiddenquery = 'extinct() - ::(. + bookmark())'
324 hiddenquery = 'extinct() - ::(. + bookmark())'
325 hidden.update(self.revs(hiddenquery))
325 hidden.update(self.revs(hiddenquery))
326 return hidden
326 return hidden
327
327
328 @storecache('00changelog.i')
328 @storecache('00changelog.i')
329 def changelog(self):
329 def changelog(self):
330 c = changelog.changelog(self.sopener)
330 c = changelog.changelog(self.sopener)
331 if 'HG_PENDING' in os.environ:
331 if 'HG_PENDING' in os.environ:
332 p = os.environ['HG_PENDING']
332 p = os.environ['HG_PENDING']
333 if p.startswith(self.root):
333 if p.startswith(self.root):
334 c.readpending('00changelog.i.a')
334 c.readpending('00changelog.i.a')
335 return c
335 return c
336
336
337 @storecache('00manifest.i')
337 @storecache('00manifest.i')
338 def manifest(self):
338 def manifest(self):
339 return manifest.manifest(self.sopener)
339 return manifest.manifest(self.sopener)
340
340
341 @filecache('dirstate')
341 @filecache('dirstate')
342 def dirstate(self):
342 def dirstate(self):
343 warned = [0]
343 warned = [0]
344 def validate(node):
344 def validate(node):
345 try:
345 try:
346 self.changelog.rev(node)
346 self.changelog.rev(node)
347 return node
347 return node
348 except error.LookupError:
348 except error.LookupError:
349 if not warned[0]:
349 if not warned[0]:
350 warned[0] = True
350 warned[0] = True
351 self.ui.warn(_("warning: ignoring unknown"
351 self.ui.warn(_("warning: ignoring unknown"
352 " working parent %s!\n") % short(node))
352 " working parent %s!\n") % short(node))
353 return nullid
353 return nullid
354
354
355 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
355 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
356
356
357 def __getitem__(self, changeid):
357 def __getitem__(self, changeid):
358 if changeid is None:
358 if changeid is None:
359 return context.workingctx(self)
359 return context.workingctx(self)
360 return context.changectx(self, changeid)
360 return context.changectx(self, changeid)
361
361
362 def __contains__(self, changeid):
362 def __contains__(self, changeid):
363 try:
363 try:
364 return bool(self.lookup(changeid))
364 return bool(self.lookup(changeid))
365 except error.RepoLookupError:
365 except error.RepoLookupError:
366 return False
366 return False
367
367
368 def __nonzero__(self):
368 def __nonzero__(self):
369 return True
369 return True
370
370
371 def __len__(self):
371 def __len__(self):
372 return len(self.changelog)
372 return len(self.changelog)
373
373
374 def __iter__(self):
374 def __iter__(self):
375 return iter(self.changelog)
375 return iter(self.changelog)
376
376
377 def revs(self, expr, *args):
377 def revs(self, expr, *args):
378 '''Return a list of revisions matching the given revset'''
378 '''Return a list of revisions matching the given revset'''
379 expr = revset.formatspec(expr, *args)
379 expr = revset.formatspec(expr, *args)
380 m = revset.match(None, expr)
380 m = revset.match(None, expr)
381 return [r for r in m(self, list(self))]
381 return [r for r in m(self, list(self))]
382
382
383 def set(self, expr, *args):
383 def set(self, expr, *args):
384 '''
384 '''
385 Yield a context for each matching revision, after doing arg
385 Yield a context for each matching revision, after doing arg
386 replacement via revset.formatspec
386 replacement via revset.formatspec
387 '''
387 '''
388 for r in self.revs(expr, *args):
388 for r in self.revs(expr, *args):
389 yield self[r]
389 yield self[r]
390
390
391 def url(self):
391 def url(self):
392 return 'file:' + self.root
392 return 'file:' + self.root
393
393
394 def hook(self, name, throw=False, **args):
394 def hook(self, name, throw=False, **args):
395 return hook.hook(self.ui, self, name, throw, **args)
395 return hook.hook(self.ui, self, name, throw, **args)
396
396
397 @unfilteredmeth
397 @unfilteredmeth
398 def _tag(self, names, node, message, local, user, date, extra={}):
398 def _tag(self, names, node, message, local, user, date, extra={}):
399 if isinstance(names, str):
399 if isinstance(names, str):
400 names = (names,)
400 names = (names,)
401
401
402 branches = self.branchmap()
402 branches = self.branchmap()
403 for name in names:
403 for name in names:
404 self.hook('pretag', throw=True, node=hex(node), tag=name,
404 self.hook('pretag', throw=True, node=hex(node), tag=name,
405 local=local)
405 local=local)
406 if name in branches:
406 if name in branches:
407 self.ui.warn(_("warning: tag %s conflicts with existing"
407 self.ui.warn(_("warning: tag %s conflicts with existing"
408 " branch name\n") % name)
408 " branch name\n") % name)
409
409
410 def writetags(fp, names, munge, prevtags):
410 def writetags(fp, names, munge, prevtags):
411 fp.seek(0, 2)
411 fp.seek(0, 2)
412 if prevtags and prevtags[-1] != '\n':
412 if prevtags and prevtags[-1] != '\n':
413 fp.write('\n')
413 fp.write('\n')
414 for name in names:
414 for name in names:
415 m = munge and munge(name) or name
415 m = munge and munge(name) or name
416 if (self._tagscache.tagtypes and
416 if (self._tagscache.tagtypes and
417 name in self._tagscache.tagtypes):
417 name in self._tagscache.tagtypes):
418 old = self.tags().get(name, nullid)
418 old = self.tags().get(name, nullid)
419 fp.write('%s %s\n' % (hex(old), m))
419 fp.write('%s %s\n' % (hex(old), m))
420 fp.write('%s %s\n' % (hex(node), m))
420 fp.write('%s %s\n' % (hex(node), m))
421 fp.close()
421 fp.close()
422
422
423 prevtags = ''
423 prevtags = ''
424 if local:
424 if local:
425 try:
425 try:
426 fp = self.opener('localtags', 'r+')
426 fp = self.opener('localtags', 'r+')
427 except IOError:
427 except IOError:
428 fp = self.opener('localtags', 'a')
428 fp = self.opener('localtags', 'a')
429 else:
429 else:
430 prevtags = fp.read()
430 prevtags = fp.read()
431
431
432 # local tags are stored in the current charset
432 # local tags are stored in the current charset
433 writetags(fp, names, None, prevtags)
433 writetags(fp, names, None, prevtags)
434 for name in names:
434 for name in names:
435 self.hook('tag', node=hex(node), tag=name, local=local)
435 self.hook('tag', node=hex(node), tag=name, local=local)
436 return
436 return
437
437
438 try:
438 try:
439 fp = self.wfile('.hgtags', 'rb+')
439 fp = self.wfile('.hgtags', 'rb+')
440 except IOError, e:
440 except IOError, e:
441 if e.errno != errno.ENOENT:
441 if e.errno != errno.ENOENT:
442 raise
442 raise
443 fp = self.wfile('.hgtags', 'ab')
443 fp = self.wfile('.hgtags', 'ab')
444 else:
444 else:
445 prevtags = fp.read()
445 prevtags = fp.read()
446
446
447 # committed tags are stored in UTF-8
447 # committed tags are stored in UTF-8
448 writetags(fp, names, encoding.fromlocal, prevtags)
448 writetags(fp, names, encoding.fromlocal, prevtags)
449
449
450 fp.close()
450 fp.close()
451
451
452 self.invalidatecaches()
452 self.invalidatecaches()
453
453
454 if '.hgtags' not in self.dirstate:
454 if '.hgtags' not in self.dirstate:
455 self[None].add(['.hgtags'])
455 self[None].add(['.hgtags'])
456
456
457 m = matchmod.exact(self.root, '', ['.hgtags'])
457 m = matchmod.exact(self.root, '', ['.hgtags'])
458 tagnode = self.commit(message, user, date, extra=extra, match=m)
458 tagnode = self.commit(message, user, date, extra=extra, match=m)
459
459
460 for name in names:
460 for name in names:
461 self.hook('tag', node=hex(node), tag=name, local=local)
461 self.hook('tag', node=hex(node), tag=name, local=local)
462
462
463 return tagnode
463 return tagnode
464
464
465 def tag(self, names, node, message, local, user, date):
465 def tag(self, names, node, message, local, user, date):
466 '''tag a revision with one or more symbolic names.
466 '''tag a revision with one or more symbolic names.
467
467
468 names is a list of strings or, when adding a single tag, names may be a
468 names is a list of strings or, when adding a single tag, names may be a
469 string.
469 string.
470
470
471 if local is True, the tags are stored in a per-repository file.
471 if local is True, the tags are stored in a per-repository file.
472 otherwise, they are stored in the .hgtags file, and a new
472 otherwise, they are stored in the .hgtags file, and a new
473 changeset is committed with the change.
473 changeset is committed with the change.
474
474
475 keyword arguments:
475 keyword arguments:
476
476
477 local: whether to store tags in non-version-controlled file
477 local: whether to store tags in non-version-controlled file
478 (default False)
478 (default False)
479
479
480 message: commit message to use if committing
480 message: commit message to use if committing
481
481
482 user: name of user to use if committing
482 user: name of user to use if committing
483
483
484 date: date tuple to use if committing'''
484 date: date tuple to use if committing'''
485
485
486 if not local:
486 if not local:
487 for x in self.status()[:5]:
487 for x in self.status()[:5]:
488 if '.hgtags' in x:
488 if '.hgtags' in x:
489 raise util.Abort(_('working copy of .hgtags is changed '
489 raise util.Abort(_('working copy of .hgtags is changed '
490 '(please commit .hgtags manually)'))
490 '(please commit .hgtags manually)'))
491
491
492 self.tags() # instantiate the cache
492 self.tags() # instantiate the cache
493 self._tag(names, node, message, local, user, date)
493 self._tag(names, node, message, local, user, date)
494
494
495 @propertycache
495 @propertycache
496 def _tagscache(self):
496 def _tagscache(self):
497 '''Returns a tagscache object that contains various tags related
497 '''Returns a tagscache object that contains various tags related
498 caches.'''
498 caches.'''
499
499
500 # This simplifies its cache management by having one decorated
500 # This simplifies its cache management by having one decorated
501 # function (this one) and the rest simply fetch things from it.
501 # function (this one) and the rest simply fetch things from it.
502 class tagscache(object):
502 class tagscache(object):
503 def __init__(self):
503 def __init__(self):
504 # These two define the set of tags for this repository. tags
504 # These two define the set of tags for this repository. tags
505 # maps tag name to node; tagtypes maps tag name to 'global' or
505 # maps tag name to node; tagtypes maps tag name to 'global' or
506 # 'local'. (Global tags are defined by .hgtags across all
506 # 'local'. (Global tags are defined by .hgtags across all
507 # heads, and local tags are defined in .hg/localtags.)
507 # heads, and local tags are defined in .hg/localtags.)
508 # They constitute the in-memory cache of tags.
508 # They constitute the in-memory cache of tags.
509 self.tags = self.tagtypes = None
509 self.tags = self.tagtypes = None
510
510
511 self.nodetagscache = self.tagslist = None
511 self.nodetagscache = self.tagslist = None
512
512
513 cache = tagscache()
513 cache = tagscache()
514 cache.tags, cache.tagtypes = self._findtags()
514 cache.tags, cache.tagtypes = self._findtags()
515
515
516 return cache
516 return cache
517
517
518 def tags(self):
518 def tags(self):
519 '''return a mapping of tag to node'''
519 '''return a mapping of tag to node'''
520 t = {}
520 t = {}
521 if self.changelog.filteredrevs:
521 if self.changelog.filteredrevs:
522 tags, tt = self._findtags()
522 tags, tt = self._findtags()
523 else:
523 else:
524 tags = self._tagscache.tags
524 tags = self._tagscache.tags
525 for k, v in tags.iteritems():
525 for k, v in tags.iteritems():
526 try:
526 try:
527 # ignore tags to unknown nodes
527 # ignore tags to unknown nodes
528 self.changelog.rev(v)
528 self.changelog.rev(v)
529 t[k] = v
529 t[k] = v
530 except (error.LookupError, ValueError):
530 except (error.LookupError, ValueError):
531 pass
531 pass
532 return t
532 return t
533
533
534 def _findtags(self):
534 def _findtags(self):
535 '''Do the hard work of finding tags. Return a pair of dicts
535 '''Do the hard work of finding tags. Return a pair of dicts
536 (tags, tagtypes) where tags maps tag name to node, and tagtypes
536 (tags, tagtypes) where tags maps tag name to node, and tagtypes
537 maps tag name to a string like \'global\' or \'local\'.
537 maps tag name to a string like \'global\' or \'local\'.
538 Subclasses or extensions are free to add their own tags, but
538 Subclasses or extensions are free to add their own tags, but
539 should be aware that the returned dicts will be retained for the
539 should be aware that the returned dicts will be retained for the
540 duration of the localrepo object.'''
540 duration of the localrepo object.'''
541
541
542 # XXX what tagtype should subclasses/extensions use? Currently
542 # XXX what tagtype should subclasses/extensions use? Currently
543 # mq and bookmarks add tags, but do not set the tagtype at all.
543 # mq and bookmarks add tags, but do not set the tagtype at all.
544 # Should each extension invent its own tag type? Should there
544 # Should each extension invent its own tag type? Should there
545 # be one tagtype for all such "virtual" tags? Or is the status
545 # be one tagtype for all such "virtual" tags? Or is the status
546 # quo fine?
546 # quo fine?
547
547
548 alltags = {} # map tag name to (node, hist)
548 alltags = {} # map tag name to (node, hist)
549 tagtypes = {}
549 tagtypes = {}
550
550
551 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
551 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
552 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
552 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
553
553
554 # Build the return dicts. Have to re-encode tag names because
554 # Build the return dicts. Have to re-encode tag names because
555 # the tags module always uses UTF-8 (in order not to lose info
555 # the tags module always uses UTF-8 (in order not to lose info
556 # writing to the cache), but the rest of Mercurial wants them in
556 # writing to the cache), but the rest of Mercurial wants them in
557 # local encoding.
557 # local encoding.
558 tags = {}
558 tags = {}
559 for (name, (node, hist)) in alltags.iteritems():
559 for (name, (node, hist)) in alltags.iteritems():
560 if node != nullid:
560 if node != nullid:
561 tags[encoding.tolocal(name)] = node
561 tags[encoding.tolocal(name)] = node
562 tags['tip'] = self.changelog.tip()
562 tags['tip'] = self.changelog.tip()
563 tagtypes = dict([(encoding.tolocal(name), value)
563 tagtypes = dict([(encoding.tolocal(name), value)
564 for (name, value) in tagtypes.iteritems()])
564 for (name, value) in tagtypes.iteritems()])
565 return (tags, tagtypes)
565 return (tags, tagtypes)
566
566
567 def tagtype(self, tagname):
567 def tagtype(self, tagname):
568 '''
568 '''
569 return the type of the given tag. result can be:
569 return the type of the given tag. result can be:
570
570
571 'local' : a local tag
571 'local' : a local tag
572 'global' : a global tag
572 'global' : a global tag
573 None : tag does not exist
573 None : tag does not exist
574 '''
574 '''
575
575
576 return self._tagscache.tagtypes.get(tagname)
576 return self._tagscache.tagtypes.get(tagname)
577
577
578 def tagslist(self):
578 def tagslist(self):
579 '''return a list of tags ordered by revision'''
579 '''return a list of tags ordered by revision'''
580 if not self._tagscache.tagslist:
580 if not self._tagscache.tagslist:
581 l = []
581 l = []
582 for t, n in self.tags().iteritems():
582 for t, n in self.tags().iteritems():
583 r = self.changelog.rev(n)
583 r = self.changelog.rev(n)
584 l.append((r, t, n))
584 l.append((r, t, n))
585 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
585 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
586
586
587 return self._tagscache.tagslist
587 return self._tagscache.tagslist
588
588
589 def nodetags(self, node):
589 def nodetags(self, node):
590 '''return the tags associated with a node'''
590 '''return the tags associated with a node'''
591 if not self._tagscache.nodetagscache:
591 if not self._tagscache.nodetagscache:
592 nodetagscache = {}
592 nodetagscache = {}
593 for t, n in self._tagscache.tags.iteritems():
593 for t, n in self._tagscache.tags.iteritems():
594 nodetagscache.setdefault(n, []).append(t)
594 nodetagscache.setdefault(n, []).append(t)
595 for tags in nodetagscache.itervalues():
595 for tags in nodetagscache.itervalues():
596 tags.sort()
596 tags.sort()
597 self._tagscache.nodetagscache = nodetagscache
597 self._tagscache.nodetagscache = nodetagscache
598 return self._tagscache.nodetagscache.get(node, [])
598 return self._tagscache.nodetagscache.get(node, [])
599
599
600 def nodebookmarks(self, node):
600 def nodebookmarks(self, node):
601 marks = []
601 marks = []
602 for bookmark, n in self._bookmarks.iteritems():
602 for bookmark, n in self._bookmarks.iteritems():
603 if n == node:
603 if n == node:
604 marks.append(bookmark)
604 marks.append(bookmark)
605 return sorted(marks)
605 return sorted(marks)
606
606
607 def _branchtags(self, partial, lrev):
607 def _branchtags(self, partial, lrev):
608 # TODO: rename this function?
608 # TODO: rename this function?
609 tiprev = len(self) - 1
609 tiprev = len(self) - 1
610 if lrev != tiprev:
610 if lrev != tiprev:
611 ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
611 ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
612 self._updatebranchcache(partial, ctxgen)
612 self._updatebranchcache(partial, ctxgen)
613 self._writebranchcache(partial, self.changelog.tip(), tiprev)
613 self._writebranchcache(partial, self.changelog.tip(), tiprev)
614
614
615 return partial
615 return partial
616
616
617 @unfilteredmeth # Until we get a smarter cache management
617 @unfilteredmeth # Until we get a smarter cache management
618 def updatebranchcache(self):
618 def updatebranchcache(self):
619 tip = self.changelog.tip()
619 tip = self.changelog.tip()
620 if self._branchcache is not None and self._branchcachetip == tip:
620 if self._branchcache is not None and self._branchcachetip == tip:
621 return
621 return
622
622
623 oldtip = self._branchcachetip
623 oldtip = self._branchcachetip
624 self._branchcachetip = tip
624 self._branchcachetip = tip
625 if oldtip is None or oldtip not in self.changelog.nodemap:
625 if oldtip is None or oldtip not in self.changelog.nodemap:
626 partial, last, lrev = self._readbranchcache()
626 partial, last, lrev = self._readbranchcache()
627 else:
627 else:
628 lrev = self.changelog.rev(oldtip)
628 lrev = self.changelog.rev(oldtip)
629 partial = self._branchcache
629 partial = self._branchcache
630
630
631 self._branchtags(partial, lrev)
631 self._branchtags(partial, lrev)
632 # this private cache holds all heads (not just the branch tips)
632 # this private cache holds all heads (not just the branch tips)
633 self._branchcache = partial
633 self._branchcache = partial
634
634
635 def branchmap(self):
635 def branchmap(self):
636 '''returns a dictionary {branch: [branchheads]}'''
636 '''returns a dictionary {branch: [branchheads]}'''
637 if self.changelog.filteredrevs:
637 if self.changelog.filteredrevs:
638 # some changeset are excluded we can't use the cache
638 # some changeset are excluded we can't use the cache
639 branchmap = {}
639 branchmap = {}
640 self._updatebranchcache(branchmap, (self[r] for r in self))
640 self._updatebranchcache(branchmap, (self[r] for r in self))
641 return branchmap
641 return branchmap
642 else:
642 else:
643 self.updatebranchcache()
643 self.updatebranchcache()
644 return self._branchcache
644 return self._branchcache
645
645
646
646
647 def _branchtip(self, heads):
647 def _branchtip(self, heads):
648 '''return the tipmost branch head in heads'''
648 '''return the tipmost branch head in heads'''
649 tip = heads[-1]
649 tip = heads[-1]
650 for h in reversed(heads):
650 for h in reversed(heads):
651 if not self[h].closesbranch():
651 if not self[h].closesbranch():
652 tip = h
652 tip = h
653 break
653 break
654 return tip
654 return tip
655
655
656 def branchtip(self, branch):
656 def branchtip(self, branch):
657 '''return the tip node for a given branch'''
657 '''return the tip node for a given branch'''
658 if branch not in self.branchmap():
658 if branch not in self.branchmap():
659 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
659 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
660 return self._branchtip(self.branchmap()[branch])
660 return self._branchtip(self.branchmap()[branch])
661
661
662 def branchtags(self):
662 def branchtags(self):
663 '''return a dict where branch names map to the tipmost head of
663 '''return a dict where branch names map to the tipmost head of
664 the branch, open heads come before closed'''
664 the branch, open heads come before closed'''
665 bt = {}
665 bt = {}
666 for bn, heads in self.branchmap().iteritems():
666 for bn, heads in self.branchmap().iteritems():
667 bt[bn] = self._branchtip(heads)
667 bt[bn] = self._branchtip(heads)
668 return bt
668 return bt
669
669
670 @unfilteredmeth # Until we get a smarter cache management
670 @unfilteredmeth # Until we get a smarter cache management
671 def _readbranchcache(self):
671 def _readbranchcache(self):
672 partial = {}
672 partial = {}
673 try:
673 try:
674 f = self.opener("cache/branchheads")
674 f = self.opener("cache/branchheads")
675 lines = f.read().split('\n')
675 lines = f.read().split('\n')
676 f.close()
676 f.close()
677 except (IOError, OSError):
677 except (IOError, OSError):
678 return {}, nullid, nullrev
678 return {}, nullid, nullrev
679
679
680 try:
680 try:
681 last, lrev = lines.pop(0).split(" ", 1)
681 last, lrev = lines.pop(0).split(" ", 1)
682 last, lrev = bin(last), int(lrev)
682 last, lrev = bin(last), int(lrev)
683 if lrev >= len(self) or self[lrev].node() != last:
683 if lrev >= len(self) or self[lrev].node() != last:
684 # invalidate the cache
684 # invalidate the cache
685 raise ValueError('invalidating branch cache (tip differs)')
685 raise ValueError('invalidating branch cache (tip differs)')
686 for l in lines:
686 for l in lines:
687 if not l:
687 if not l:
688 continue
688 continue
689 node, label = l.split(" ", 1)
689 node, label = l.split(" ", 1)
690 label = encoding.tolocal(label.strip())
690 label = encoding.tolocal(label.strip())
691 if not node in self:
691 if not node in self:
692 raise ValueError('invalidating branch cache because node '+
692 raise ValueError('invalidating branch cache because node '+
693 '%s does not exist' % node)
693 '%s does not exist' % node)
694 partial.setdefault(label, []).append(bin(node))
694 partial.setdefault(label, []).append(bin(node))
695 except KeyboardInterrupt:
695 except KeyboardInterrupt:
696 raise
696 raise
697 except Exception, inst:
697 except Exception, inst:
698 if self.ui.debugflag:
698 if self.ui.debugflag:
699 self.ui.warn(str(inst), '\n')
699 self.ui.warn(str(inst), '\n')
700 partial, last, lrev = {}, nullid, nullrev
700 partial, last, lrev = {}, nullid, nullrev
701 return partial, last, lrev
701 return partial, last, lrev
702
702
703 @unfilteredmeth # Until we get a smarter cache management
703 @unfilteredmeth # Until we get a smarter cache management
704 def _writebranchcache(self, branches, tip, tiprev):
704 def _writebranchcache(self, branches, tip, tiprev):
705 try:
705 try:
706 f = self.opener("cache/branchheads", "w", atomictemp=True)
706 f = self.opener("cache/branchheads", "w", atomictemp=True)
707 f.write("%s %s\n" % (hex(tip), tiprev))
707 f.write("%s %s\n" % (hex(tip), tiprev))
708 for label, nodes in branches.iteritems():
708 for label, nodes in branches.iteritems():
709 for node in nodes:
709 for node in nodes:
710 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
710 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
711 f.close()
711 f.close()
712 except (IOError, OSError):
712 except (IOError, OSError):
713 pass
713 pass
714
714
715 @unfilteredmeth # Until we get a smarter cache management
715 @unfilteredmeth # Until we get a smarter cache management
716 def _updatebranchcache(self, partial, ctxgen):
716 def _updatebranchcache(self, partial, ctxgen):
717 """Given a branchhead cache, partial, that may have extra nodes or be
717 """Given a branchhead cache, partial, that may have extra nodes or be
718 missing heads, and a generator of nodes that are at least a superset of
718 missing heads, and a generator of nodes that are at least a superset of
719 heads missing, this function updates partial to be correct.
719 heads missing, this function updates partial to be correct.
720 """
720 """
721 # collect new branch entries
721 # collect new branch entries
722 newbranches = {}
722 newbranches = {}
723 for c in ctxgen:
723 for c in ctxgen:
724 newbranches.setdefault(c.branch(), []).append(c.node())
724 newbranches.setdefault(c.branch(), []).append(c.node())
725 # if older branchheads are reachable from new ones, they aren't
725 # if older branchheads are reachable from new ones, they aren't
726 # really branchheads. Note checking parents is insufficient:
726 # really branchheads. Note checking parents is insufficient:
727 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
727 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
728 for branch, newnodes in newbranches.iteritems():
728 for branch, newnodes in newbranches.iteritems():
729 bheads = partial.setdefault(branch, [])
729 bheads = partial.setdefault(branch, [])
730 # Remove candidate heads that no longer are in the repo (e.g., as
730 # Remove candidate heads that no longer are in the repo (e.g., as
731 # the result of a strip that just happened). Avoid using 'node in
731 # the result of a strip that just happened). Avoid using 'node in
732 # self' here because that dives down into branchcache code somewhat
732 # self' here because that dives down into branchcache code somewhat
733 # recursively.
733 # recursively.
734 bheadrevs = [self.changelog.rev(node) for node in bheads
734 bheadrevs = [self.changelog.rev(node) for node in bheads
735 if self.changelog.hasnode(node)]
735 if self.changelog.hasnode(node)]
736 newheadrevs = [self.changelog.rev(node) for node in newnodes
736 newheadrevs = [self.changelog.rev(node) for node in newnodes
737 if self.changelog.hasnode(node)]
737 if self.changelog.hasnode(node)]
738 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
738 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
739 # Remove duplicates - nodes that are in newheadrevs and are already
739 # Remove duplicates - nodes that are in newheadrevs and are already
740 # in bheadrevs. This can happen if you strip a node whose parent
740 # in bheadrevs. This can happen if you strip a node whose parent
741 # was already a head (because they're on different branches).
741 # was already a head (because they're on different branches).
742 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
742 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
743
743
744 # Starting from tip means fewer passes over reachable. If we know
744 # Starting from tip means fewer passes over reachable. If we know
745 # the new candidates are not ancestors of existing heads, we don't
745 # the new candidates are not ancestors of existing heads, we don't
746 # have to examine ancestors of existing heads
746 # have to examine ancestors of existing heads
747 if ctxisnew:
747 if ctxisnew:
748 iterrevs = sorted(newheadrevs)
748 iterrevs = sorted(newheadrevs)
749 else:
749 else:
750 iterrevs = list(bheadrevs)
750 iterrevs = list(bheadrevs)
751
751
752 # This loop prunes out two kinds of heads - heads that are
752 # This loop prunes out two kinds of heads - heads that are
753 # superseded by a head in newheadrevs, and newheadrevs that are not
753 # superseded by a head in newheadrevs, and newheadrevs that are not
754 # heads because an existing head is their descendant.
754 # heads because an existing head is their descendant.
755 while iterrevs:
755 while iterrevs:
756 latest = iterrevs.pop()
756 latest = iterrevs.pop()
757 if latest not in bheadrevs:
757 if latest not in bheadrevs:
758 continue
758 continue
759 ancestors = set(self.changelog.ancestors([latest],
759 ancestors = set(self.changelog.ancestors([latest],
760 bheadrevs[0]))
760 bheadrevs[0]))
761 if ancestors:
761 if ancestors:
762 bheadrevs = [b for b in bheadrevs if b not in ancestors]
762 bheadrevs = [b for b in bheadrevs if b not in ancestors]
763 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
763 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
764
764
765 # There may be branches that cease to exist when the last commit in the
765 # There may be branches that cease to exist when the last commit in the
766 # branch was stripped. This code filters them out. Note that the
766 # branch was stripped. This code filters them out. Note that the
767 # branch that ceased to exist may not be in newbranches because
767 # branch that ceased to exist may not be in newbranches because
768 # newbranches is the set of candidate heads, which when you strip the
768 # newbranches is the set of candidate heads, which when you strip the
769 # last commit in a branch will be the parent branch.
769 # last commit in a branch will be the parent branch.
770 for branch in partial.keys():
770 for branch in partial.keys():
771 nodes = [head for head in partial[branch]
771 nodes = [head for head in partial[branch]
772 if self.changelog.hasnode(head)]
772 if self.changelog.hasnode(head)]
773 if not nodes:
773 if not nodes:
774 del partial[branch]
774 del partial[branch]
775
775
776 def lookup(self, key):
776 def lookup(self, key):
777 return self[key].node()
777 return self[key].node()
778
778
779 def lookupbranch(self, key, remote=None):
779 def lookupbranch(self, key, remote=None):
780 repo = remote or self
780 repo = remote or self
781 if key in repo.branchmap():
781 if key in repo.branchmap():
782 return key
782 return key
783
783
784 repo = (remote and remote.local()) and remote or self
784 repo = (remote and remote.local()) and remote or self
785 return repo[key].branch()
785 return repo[key].branch()
786
786
787 def known(self, nodes):
787 def known(self, nodes):
788 nm = self.changelog.nodemap
788 nm = self.changelog.nodemap
789 pc = self._phasecache
789 pc = self._phasecache
790 result = []
790 result = []
791 for n in nodes:
791 for n in nodes:
792 r = nm.get(n)
792 r = nm.get(n)
793 resp = not (r is None or pc.phase(self, r) >= phases.secret)
793 resp = not (r is None or pc.phase(self, r) >= phases.secret)
794 result.append(resp)
794 result.append(resp)
795 return result
795 return result
796
796
797 def local(self):
797 def local(self):
798 return self
798 return self
799
799
800 def cancopy(self):
800 def cancopy(self):
801 return self.local() # so statichttprepo's override of local() works
801 return self.local() # so statichttprepo's override of local() works
802
802
803 def join(self, f):
803 def join(self, f):
804 return os.path.join(self.path, f)
804 return os.path.join(self.path, f)
805
805
806 def wjoin(self, f):
806 def wjoin(self, f):
807 return os.path.join(self.root, f)
807 return os.path.join(self.root, f)
808
808
809 def file(self, f):
809 def file(self, f):
810 if f[0] == '/':
810 if f[0] == '/':
811 f = f[1:]
811 f = f[1:]
812 return filelog.filelog(self.sopener, f)
812 return filelog.filelog(self.sopener, f)
813
813
814 def changectx(self, changeid):
814 def changectx(self, changeid):
815 return self[changeid]
815 return self[changeid]
816
816
817 def parents(self, changeid=None):
817 def parents(self, changeid=None):
818 '''get list of changectxs for parents of changeid'''
818 '''get list of changectxs for parents of changeid'''
819 return self[changeid].parents()
819 return self[changeid].parents()
820
820
821 def setparents(self, p1, p2=nullid):
821 def setparents(self, p1, p2=nullid):
822 copies = self.dirstate.setparents(p1, p2)
822 copies = self.dirstate.setparents(p1, p2)
823 if copies:
823 if copies:
824 # Adjust copy records, the dirstate cannot do it, it
824 # Adjust copy records, the dirstate cannot do it, it
825 # requires access to parents manifests. Preserve them
825 # requires access to parents manifests. Preserve them
826 # only for entries added to first parent.
826 # only for entries added to first parent.
827 pctx = self[p1]
827 pctx = self[p1]
828 for f in copies:
828 for f in copies:
829 if f not in pctx and copies[f] in pctx:
829 if f not in pctx and copies[f] in pctx:
830 self.dirstate.copy(copies[f], f)
830 self.dirstate.copy(copies[f], f)
831
831
832 def filectx(self, path, changeid=None, fileid=None):
832 def filectx(self, path, changeid=None, fileid=None):
833 """changeid can be a changeset revision, node, or tag.
833 """changeid can be a changeset revision, node, or tag.
834 fileid can be a file revision or node."""
834 fileid can be a file revision or node."""
835 return context.filectx(self, path, changeid, fileid)
835 return context.filectx(self, path, changeid, fileid)
836
836
837 def getcwd(self):
837 def getcwd(self):
838 return self.dirstate.getcwd()
838 return self.dirstate.getcwd()
839
839
840 def pathto(self, f, cwd=None):
840 def pathto(self, f, cwd=None):
841 return self.dirstate.pathto(f, cwd)
841 return self.dirstate.pathto(f, cwd)
842
842
843 def wfile(self, f, mode='r'):
843 def wfile(self, f, mode='r'):
844 return self.wopener(f, mode)
844 return self.wopener(f, mode)
845
845
846 def _link(self, f):
846 def _link(self, f):
847 return os.path.islink(self.wjoin(f))
847 return os.path.islink(self.wjoin(f))
848
848
849 def _loadfilter(self, filter):
849 def _loadfilter(self, filter):
850 if filter not in self.filterpats:
850 if filter not in self.filterpats:
851 l = []
851 l = []
852 for pat, cmd in self.ui.configitems(filter):
852 for pat, cmd in self.ui.configitems(filter):
853 if cmd == '!':
853 if cmd == '!':
854 continue
854 continue
855 mf = matchmod.match(self.root, '', [pat])
855 mf = matchmod.match(self.root, '', [pat])
856 fn = None
856 fn = None
857 params = cmd
857 params = cmd
858 for name, filterfn in self._datafilters.iteritems():
858 for name, filterfn in self._datafilters.iteritems():
859 if cmd.startswith(name):
859 if cmd.startswith(name):
860 fn = filterfn
860 fn = filterfn
861 params = cmd[len(name):].lstrip()
861 params = cmd[len(name):].lstrip()
862 break
862 break
863 if not fn:
863 if not fn:
864 fn = lambda s, c, **kwargs: util.filter(s, c)
864 fn = lambda s, c, **kwargs: util.filter(s, c)
865 # Wrap old filters not supporting keyword arguments
865 # Wrap old filters not supporting keyword arguments
866 if not inspect.getargspec(fn)[2]:
866 if not inspect.getargspec(fn)[2]:
867 oldfn = fn
867 oldfn = fn
868 fn = lambda s, c, **kwargs: oldfn(s, c)
868 fn = lambda s, c, **kwargs: oldfn(s, c)
869 l.append((mf, fn, params))
869 l.append((mf, fn, params))
870 self.filterpats[filter] = l
870 self.filterpats[filter] = l
871 return self.filterpats[filter]
871 return self.filterpats[filter]
872
872
873 def _filter(self, filterpats, filename, data):
873 def _filter(self, filterpats, filename, data):
874 for mf, fn, cmd in filterpats:
874 for mf, fn, cmd in filterpats:
875 if mf(filename):
875 if mf(filename):
876 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
876 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
877 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
877 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
878 break
878 break
879
879
880 return data
880 return data
881
881
882 @propertycache
882 @propertycache
883 def _encodefilterpats(self):
883 def _encodefilterpats(self):
884 return self._loadfilter('encode')
884 return self._loadfilter('encode')
885
885
886 @propertycache
886 @propertycache
887 def _decodefilterpats(self):
887 def _decodefilterpats(self):
888 return self._loadfilter('decode')
888 return self._loadfilter('decode')
889
889
890 def adddatafilter(self, name, filter):
890 def adddatafilter(self, name, filter):
891 self._datafilters[name] = filter
891 self._datafilters[name] = filter
892
892
893 def wread(self, filename):
893 def wread(self, filename):
894 if self._link(filename):
894 if self._link(filename):
895 data = os.readlink(self.wjoin(filename))
895 data = os.readlink(self.wjoin(filename))
896 else:
896 else:
897 data = self.wopener.read(filename)
897 data = self.wopener.read(filename)
898 return self._filter(self._encodefilterpats, filename, data)
898 return self._filter(self._encodefilterpats, filename, data)
899
899
900 def wwrite(self, filename, data, flags):
900 def wwrite(self, filename, data, flags):
901 data = self._filter(self._decodefilterpats, filename, data)
901 data = self._filter(self._decodefilterpats, filename, data)
902 if 'l' in flags:
902 if 'l' in flags:
903 self.wopener.symlink(data, filename)
903 self.wopener.symlink(data, filename)
904 else:
904 else:
905 self.wopener.write(filename, data)
905 self.wopener.write(filename, data)
906 if 'x' in flags:
906 if 'x' in flags:
907 util.setflags(self.wjoin(filename), False, True)
907 util.setflags(self.wjoin(filename), False, True)
908
908
909 def wwritedata(self, filename, data):
909 def wwritedata(self, filename, data):
910 return self._filter(self._decodefilterpats, filename, data)
910 return self._filter(self._decodefilterpats, filename, data)
911
911
912 def transaction(self, desc):
912 def transaction(self, desc):
913 tr = self._transref and self._transref() or None
913 tr = self._transref and self._transref() or None
914 if tr and tr.running():
914 if tr and tr.running():
915 return tr.nest()
915 return tr.nest()
916
916
917 # abort here if the journal already exists
917 # abort here if the journal already exists
918 if os.path.exists(self.sjoin("journal")):
918 if os.path.exists(self.sjoin("journal")):
919 raise error.RepoError(
919 raise error.RepoError(
920 _("abandoned transaction found - run hg recover"))
920 _("abandoned transaction found - run hg recover"))
921
921
922 self._writejournal(desc)
922 self._writejournal(desc)
923 renames = [(x, undoname(x)) for x in self._journalfiles()]
923 renames = [(x, undoname(x)) for x in self._journalfiles()]
924
924
925 tr = transaction.transaction(self.ui.warn, self.sopener,
925 tr = transaction.transaction(self.ui.warn, self.sopener,
926 self.sjoin("journal"),
926 self.sjoin("journal"),
927 aftertrans(renames),
927 aftertrans(renames),
928 self.store.createmode)
928 self.store.createmode)
929 self._transref = weakref.ref(tr)
929 self._transref = weakref.ref(tr)
930 return tr
930 return tr
931
931
932 def _journalfiles(self):
932 def _journalfiles(self):
933 return (self.sjoin('journal'), self.join('journal.dirstate'),
933 return (self.sjoin('journal'), self.join('journal.dirstate'),
934 self.join('journal.branch'), self.join('journal.desc'),
934 self.join('journal.branch'), self.join('journal.desc'),
935 self.join('journal.bookmarks'),
935 self.join('journal.bookmarks'),
936 self.sjoin('journal.phaseroots'))
936 self.sjoin('journal.phaseroots'))
937
937
938 def undofiles(self):
938 def undofiles(self):
939 return [undoname(x) for x in self._journalfiles()]
939 return [undoname(x) for x in self._journalfiles()]
940
940
941 def _writejournal(self, desc):
941 def _writejournal(self, desc):
942 self.opener.write("journal.dirstate",
942 self.opener.write("journal.dirstate",
943 self.opener.tryread("dirstate"))
943 self.opener.tryread("dirstate"))
944 self.opener.write("journal.branch",
944 self.opener.write("journal.branch",
945 encoding.fromlocal(self.dirstate.branch()))
945 encoding.fromlocal(self.dirstate.branch()))
946 self.opener.write("journal.desc",
946 self.opener.write("journal.desc",
947 "%d\n%s\n" % (len(self), desc))
947 "%d\n%s\n" % (len(self), desc))
948 self.opener.write("journal.bookmarks",
948 self.opener.write("journal.bookmarks",
949 self.opener.tryread("bookmarks"))
949 self.opener.tryread("bookmarks"))
950 self.sopener.write("journal.phaseroots",
950 self.sopener.write("journal.phaseroots",
951 self.sopener.tryread("phaseroots"))
951 self.sopener.tryread("phaseroots"))
952
952
953 def recover(self):
953 def recover(self):
954 lock = self.lock()
954 lock = self.lock()
955 try:
955 try:
956 if os.path.exists(self.sjoin("journal")):
956 if os.path.exists(self.sjoin("journal")):
957 self.ui.status(_("rolling back interrupted transaction\n"))
957 self.ui.status(_("rolling back interrupted transaction\n"))
958 transaction.rollback(self.sopener, self.sjoin("journal"),
958 transaction.rollback(self.sopener, self.sjoin("journal"),
959 self.ui.warn)
959 self.ui.warn)
960 self.invalidate()
960 self.invalidate()
961 return True
961 return True
962 else:
962 else:
963 self.ui.warn(_("no interrupted transaction available\n"))
963 self.ui.warn(_("no interrupted transaction available\n"))
964 return False
964 return False
965 finally:
965 finally:
966 lock.release()
966 lock.release()
967
967
968 def rollback(self, dryrun=False, force=False):
968 def rollback(self, dryrun=False, force=False):
969 wlock = lock = None
969 wlock = lock = None
970 try:
970 try:
971 wlock = self.wlock()
971 wlock = self.wlock()
972 lock = self.lock()
972 lock = self.lock()
973 if os.path.exists(self.sjoin("undo")):
973 if os.path.exists(self.sjoin("undo")):
974 return self._rollback(dryrun, force)
974 return self._rollback(dryrun, force)
975 else:
975 else:
976 self.ui.warn(_("no rollback information available\n"))
976 self.ui.warn(_("no rollback information available\n"))
977 return 1
977 return 1
978 finally:
978 finally:
979 release(lock, wlock)
979 release(lock, wlock)
980
980
981 @unfilteredmeth # Until we get smarter cache management
981 def _rollback(self, dryrun, force):
982 def _rollback(self, dryrun, force):
982 ui = self.ui
983 ui = self.ui
983 try:
984 try:
984 args = self.opener.read('undo.desc').splitlines()
985 args = self.opener.read('undo.desc').splitlines()
985 (oldlen, desc, detail) = (int(args[0]), args[1], None)
986 (oldlen, desc, detail) = (int(args[0]), args[1], None)
986 if len(args) >= 3:
987 if len(args) >= 3:
987 detail = args[2]
988 detail = args[2]
988 oldtip = oldlen - 1
989 oldtip = oldlen - 1
989
990
990 if detail and ui.verbose:
991 if detail and ui.verbose:
991 msg = (_('repository tip rolled back to revision %s'
992 msg = (_('repository tip rolled back to revision %s'
992 ' (undo %s: %s)\n')
993 ' (undo %s: %s)\n')
993 % (oldtip, desc, detail))
994 % (oldtip, desc, detail))
994 else:
995 else:
995 msg = (_('repository tip rolled back to revision %s'
996 msg = (_('repository tip rolled back to revision %s'
996 ' (undo %s)\n')
997 ' (undo %s)\n')
997 % (oldtip, desc))
998 % (oldtip, desc))
998 except IOError:
999 except IOError:
999 msg = _('rolling back unknown transaction\n')
1000 msg = _('rolling back unknown transaction\n')
1000 desc = None
1001 desc = None
1001
1002
1002 if not force and self['.'] != self['tip'] and desc == 'commit':
1003 if not force and self['.'] != self['tip'] and desc == 'commit':
1003 raise util.Abort(
1004 raise util.Abort(
1004 _('rollback of last commit while not checked out '
1005 _('rollback of last commit while not checked out '
1005 'may lose data'), hint=_('use -f to force'))
1006 'may lose data'), hint=_('use -f to force'))
1006
1007
1007 ui.status(msg)
1008 ui.status(msg)
1008 if dryrun:
1009 if dryrun:
1009 return 0
1010 return 0
1010
1011
1011 parents = self.dirstate.parents()
1012 parents = self.dirstate.parents()
1012 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1013 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1013 if os.path.exists(self.join('undo.bookmarks')):
1014 if os.path.exists(self.join('undo.bookmarks')):
1014 util.rename(self.join('undo.bookmarks'),
1015 util.rename(self.join('undo.bookmarks'),
1015 self.join('bookmarks'))
1016 self.join('bookmarks'))
1016 if os.path.exists(self.sjoin('undo.phaseroots')):
1017 if os.path.exists(self.sjoin('undo.phaseroots')):
1017 util.rename(self.sjoin('undo.phaseroots'),
1018 util.rename(self.sjoin('undo.phaseroots'),
1018 self.sjoin('phaseroots'))
1019 self.sjoin('phaseroots'))
1019 self.invalidate()
1020 self.invalidate()
1020
1021
1021 # Discard all cache entries to force reloading everything.
1022 # Discard all cache entries to force reloading everything.
1022 self._filecache.clear()
1023 self._filecache.clear()
1023
1024
1024 parentgone = (parents[0] not in self.changelog.nodemap or
1025 parentgone = (parents[0] not in self.changelog.nodemap or
1025 parents[1] not in self.changelog.nodemap)
1026 parents[1] not in self.changelog.nodemap)
1026 if parentgone:
1027 if parentgone:
1027 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1028 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1028 try:
1029 try:
1029 branch = self.opener.read('undo.branch')
1030 branch = self.opener.read('undo.branch')
1030 self.dirstate.setbranch(encoding.tolocal(branch))
1031 self.dirstate.setbranch(encoding.tolocal(branch))
1031 except IOError:
1032 except IOError:
1032 ui.warn(_('named branch could not be reset: '
1033 ui.warn(_('named branch could not be reset: '
1033 'current branch is still \'%s\'\n')
1034 'current branch is still \'%s\'\n')
1034 % self.dirstate.branch())
1035 % self.dirstate.branch())
1035
1036
1036 self.dirstate.invalidate()
1037 self.dirstate.invalidate()
1037 parents = tuple([p.rev() for p in self.parents()])
1038 parents = tuple([p.rev() for p in self.parents()])
1038 if len(parents) > 1:
1039 if len(parents) > 1:
1039 ui.status(_('working directory now based on '
1040 ui.status(_('working directory now based on '
1040 'revisions %d and %d\n') % parents)
1041 'revisions %d and %d\n') % parents)
1041 else:
1042 else:
1042 ui.status(_('working directory now based on '
1043 ui.status(_('working directory now based on '
1043 'revision %d\n') % parents)
1044 'revision %d\n') % parents)
1044 # TODO: if we know which new heads may result from this rollback, pass
1045 # TODO: if we know which new heads may result from this rollback, pass
1045 # them to destroy(), which will prevent the branchhead cache from being
1046 # them to destroy(), which will prevent the branchhead cache from being
1046 # invalidated.
1047 # invalidated.
1047 self.destroyed()
1048 self.destroyed()
1048 return 0
1049 return 0
1049
1050
1050 def invalidatecaches(self):
1051 def invalidatecaches(self):
1051 def delcache(name):
1052 def delcache(name):
1052 try:
1053 try:
1053 delattr(self, name)
1054 delattr(self, name)
1054 except AttributeError:
1055 except AttributeError:
1055 pass
1056 pass
1056
1057
1057 delcache('_tagscache')
1058 delcache('_tagscache')
1058
1059
1059 self.unfiltered()._branchcache = None # in UTF-8
1060 self.unfiltered()._branchcache = None # in UTF-8
1060 self.unfiltered()._branchcachetip = None
1061 self.unfiltered()._branchcachetip = None
1061 obsolete.clearobscaches(self)
1062 obsolete.clearobscaches(self)
1062
1063
1063 def invalidatedirstate(self):
1064 def invalidatedirstate(self):
1064 '''Invalidates the dirstate, causing the next call to dirstate
1065 '''Invalidates the dirstate, causing the next call to dirstate
1065 to check if it was modified since the last time it was read,
1066 to check if it was modified since the last time it was read,
1066 rereading it if it has.
1067 rereading it if it has.
1067
1068
1068 This is different to dirstate.invalidate() that it doesn't always
1069 This is different to dirstate.invalidate() that it doesn't always
1069 rereads the dirstate. Use dirstate.invalidate() if you want to
1070 rereads the dirstate. Use dirstate.invalidate() if you want to
1070 explicitly read the dirstate again (i.e. restoring it to a previous
1071 explicitly read the dirstate again (i.e. restoring it to a previous
1071 known good state).'''
1072 known good state).'''
1072 if 'dirstate' in self.__dict__:
1073 if 'dirstate' in self.__dict__:
1073 for k in self.dirstate._filecache:
1074 for k in self.dirstate._filecache:
1074 try:
1075 try:
1075 delattr(self.dirstate, k)
1076 delattr(self.dirstate, k)
1076 except AttributeError:
1077 except AttributeError:
1077 pass
1078 pass
1078 delattr(self.unfiltered(), 'dirstate')
1079 delattr(self.unfiltered(), 'dirstate')
1079
1080
1080 def invalidate(self):
1081 def invalidate(self):
1081 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
1082 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
1082 for k in self._filecache:
1083 for k in self._filecache:
1083 # dirstate is invalidated separately in invalidatedirstate()
1084 # dirstate is invalidated separately in invalidatedirstate()
1084 if k == 'dirstate':
1085 if k == 'dirstate':
1085 continue
1086 continue
1086
1087
1087 try:
1088 try:
1088 delattr(unfiltered, k)
1089 delattr(unfiltered, k)
1089 except AttributeError:
1090 except AttributeError:
1090 pass
1091 pass
1091 self.invalidatecaches()
1092 self.invalidatecaches()
1092
1093
1093 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1094 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1094 try:
1095 try:
1095 l = lock.lock(lockname, 0, releasefn, desc=desc)
1096 l = lock.lock(lockname, 0, releasefn, desc=desc)
1096 except error.LockHeld, inst:
1097 except error.LockHeld, inst:
1097 if not wait:
1098 if not wait:
1098 raise
1099 raise
1099 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1100 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1100 (desc, inst.locker))
1101 (desc, inst.locker))
1101 # default to 600 seconds timeout
1102 # default to 600 seconds timeout
1102 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1103 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1103 releasefn, desc=desc)
1104 releasefn, desc=desc)
1104 if acquirefn:
1105 if acquirefn:
1105 acquirefn()
1106 acquirefn()
1106 return l
1107 return l
1107
1108
1108 def _afterlock(self, callback):
1109 def _afterlock(self, callback):
1109 """add a callback to the current repository lock.
1110 """add a callback to the current repository lock.
1110
1111
1111 The callback will be executed on lock release."""
1112 The callback will be executed on lock release."""
1112 l = self._lockref and self._lockref()
1113 l = self._lockref and self._lockref()
1113 if l:
1114 if l:
1114 l.postrelease.append(callback)
1115 l.postrelease.append(callback)
1115 else:
1116 else:
1116 callback()
1117 callback()
1117
1118
1118 def lock(self, wait=True):
1119 def lock(self, wait=True):
1119 '''Lock the repository store (.hg/store) and return a weak reference
1120 '''Lock the repository store (.hg/store) and return a weak reference
1120 to the lock. Use this before modifying the store (e.g. committing or
1121 to the lock. Use this before modifying the store (e.g. committing or
1121 stripping). If you are opening a transaction, get a lock as well.)'''
1122 stripping). If you are opening a transaction, get a lock as well.)'''
1122 l = self._lockref and self._lockref()
1123 l = self._lockref and self._lockref()
1123 if l is not None and l.held:
1124 if l is not None and l.held:
1124 l.lock()
1125 l.lock()
1125 return l
1126 return l
1126
1127
1127 def unlock():
1128 def unlock():
1128 self.store.write()
1129 self.store.write()
1129 if '_phasecache' in vars(self):
1130 if '_phasecache' in vars(self):
1130 self._phasecache.write()
1131 self._phasecache.write()
1131 for k, ce in self._filecache.items():
1132 for k, ce in self._filecache.items():
1132 if k == 'dirstate':
1133 if k == 'dirstate':
1133 continue
1134 continue
1134 ce.refresh()
1135 ce.refresh()
1135
1136
1136 l = self._lock(self.sjoin("lock"), wait, unlock,
1137 l = self._lock(self.sjoin("lock"), wait, unlock,
1137 self.invalidate, _('repository %s') % self.origroot)
1138 self.invalidate, _('repository %s') % self.origroot)
1138 self._lockref = weakref.ref(l)
1139 self._lockref = weakref.ref(l)
1139 return l
1140 return l
1140
1141
1141 def wlock(self, wait=True):
1142 def wlock(self, wait=True):
1142 '''Lock the non-store parts of the repository (everything under
1143 '''Lock the non-store parts of the repository (everything under
1143 .hg except .hg/store) and return a weak reference to the lock.
1144 .hg except .hg/store) and return a weak reference to the lock.
1144 Use this before modifying files in .hg.'''
1145 Use this before modifying files in .hg.'''
1145 l = self._wlockref and self._wlockref()
1146 l = self._wlockref and self._wlockref()
1146 if l is not None and l.held:
1147 if l is not None and l.held:
1147 l.lock()
1148 l.lock()
1148 return l
1149 return l
1149
1150
1150 def unlock():
1151 def unlock():
1151 self.dirstate.write()
1152 self.dirstate.write()
1152 ce = self._filecache.get('dirstate')
1153 ce = self._filecache.get('dirstate')
1153 if ce:
1154 if ce:
1154 ce.refresh()
1155 ce.refresh()
1155
1156
1156 l = self._lock(self.join("wlock"), wait, unlock,
1157 l = self._lock(self.join("wlock"), wait, unlock,
1157 self.invalidatedirstate, _('working directory of %s') %
1158 self.invalidatedirstate, _('working directory of %s') %
1158 self.origroot)
1159 self.origroot)
1159 self._wlockref = weakref.ref(l)
1160 self._wlockref = weakref.ref(l)
1160 return l
1161 return l
1161
1162
1162 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1163 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1163 """
1164 """
1164 commit an individual file as part of a larger transaction
1165 commit an individual file as part of a larger transaction
1165 """
1166 """
1166
1167
1167 fname = fctx.path()
1168 fname = fctx.path()
1168 text = fctx.data()
1169 text = fctx.data()
1169 flog = self.file(fname)
1170 flog = self.file(fname)
1170 fparent1 = manifest1.get(fname, nullid)
1171 fparent1 = manifest1.get(fname, nullid)
1171 fparent2 = fparent2o = manifest2.get(fname, nullid)
1172 fparent2 = fparent2o = manifest2.get(fname, nullid)
1172
1173
1173 meta = {}
1174 meta = {}
1174 copy = fctx.renamed()
1175 copy = fctx.renamed()
1175 if copy and copy[0] != fname:
1176 if copy and copy[0] != fname:
1176 # Mark the new revision of this file as a copy of another
1177 # Mark the new revision of this file as a copy of another
1177 # file. This copy data will effectively act as a parent
1178 # file. This copy data will effectively act as a parent
1178 # of this new revision. If this is a merge, the first
1179 # of this new revision. If this is a merge, the first
1179 # parent will be the nullid (meaning "look up the copy data")
1180 # parent will be the nullid (meaning "look up the copy data")
1180 # and the second one will be the other parent. For example:
1181 # and the second one will be the other parent. For example:
1181 #
1182 #
1182 # 0 --- 1 --- 3 rev1 changes file foo
1183 # 0 --- 1 --- 3 rev1 changes file foo
1183 # \ / rev2 renames foo to bar and changes it
1184 # \ / rev2 renames foo to bar and changes it
1184 # \- 2 -/ rev3 should have bar with all changes and
1185 # \- 2 -/ rev3 should have bar with all changes and
1185 # should record that bar descends from
1186 # should record that bar descends from
1186 # bar in rev2 and foo in rev1
1187 # bar in rev2 and foo in rev1
1187 #
1188 #
1188 # this allows this merge to succeed:
1189 # this allows this merge to succeed:
1189 #
1190 #
1190 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1191 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1191 # \ / merging rev3 and rev4 should use bar@rev2
1192 # \ / merging rev3 and rev4 should use bar@rev2
1192 # \- 2 --- 4 as the merge base
1193 # \- 2 --- 4 as the merge base
1193 #
1194 #
1194
1195
1195 cfname = copy[0]
1196 cfname = copy[0]
1196 crev = manifest1.get(cfname)
1197 crev = manifest1.get(cfname)
1197 newfparent = fparent2
1198 newfparent = fparent2
1198
1199
1199 if manifest2: # branch merge
1200 if manifest2: # branch merge
1200 if fparent2 == nullid or crev is None: # copied on remote side
1201 if fparent2 == nullid or crev is None: # copied on remote side
1201 if cfname in manifest2:
1202 if cfname in manifest2:
1202 crev = manifest2[cfname]
1203 crev = manifest2[cfname]
1203 newfparent = fparent1
1204 newfparent = fparent1
1204
1205
1205 # find source in nearest ancestor if we've lost track
1206 # find source in nearest ancestor if we've lost track
1206 if not crev:
1207 if not crev:
1207 self.ui.debug(" %s: searching for copy revision for %s\n" %
1208 self.ui.debug(" %s: searching for copy revision for %s\n" %
1208 (fname, cfname))
1209 (fname, cfname))
1209 for ancestor in self[None].ancestors():
1210 for ancestor in self[None].ancestors():
1210 if cfname in ancestor:
1211 if cfname in ancestor:
1211 crev = ancestor[cfname].filenode()
1212 crev = ancestor[cfname].filenode()
1212 break
1213 break
1213
1214
1214 if crev:
1215 if crev:
1215 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1216 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1216 meta["copy"] = cfname
1217 meta["copy"] = cfname
1217 meta["copyrev"] = hex(crev)
1218 meta["copyrev"] = hex(crev)
1218 fparent1, fparent2 = nullid, newfparent
1219 fparent1, fparent2 = nullid, newfparent
1219 else:
1220 else:
1220 self.ui.warn(_("warning: can't find ancestor for '%s' "
1221 self.ui.warn(_("warning: can't find ancestor for '%s' "
1221 "copied from '%s'!\n") % (fname, cfname))
1222 "copied from '%s'!\n") % (fname, cfname))
1222
1223
1223 elif fparent2 != nullid:
1224 elif fparent2 != nullid:
1224 # is one parent an ancestor of the other?
1225 # is one parent an ancestor of the other?
1225 fparentancestor = flog.ancestor(fparent1, fparent2)
1226 fparentancestor = flog.ancestor(fparent1, fparent2)
1226 if fparentancestor == fparent1:
1227 if fparentancestor == fparent1:
1227 fparent1, fparent2 = fparent2, nullid
1228 fparent1, fparent2 = fparent2, nullid
1228 elif fparentancestor == fparent2:
1229 elif fparentancestor == fparent2:
1229 fparent2 = nullid
1230 fparent2 = nullid
1230
1231
1231 # is the file changed?
1232 # is the file changed?
1232 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1233 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1233 changelist.append(fname)
1234 changelist.append(fname)
1234 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1235 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1235
1236
1236 # are just the flags changed during merge?
1237 # are just the flags changed during merge?
1237 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1238 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1238 changelist.append(fname)
1239 changelist.append(fname)
1239
1240
1240 return fparent1
1241 return fparent1
1241
1242
1242 def commit(self, text="", user=None, date=None, match=None, force=False,
1243 def commit(self, text="", user=None, date=None, match=None, force=False,
1243 editor=False, extra={}):
1244 editor=False, extra={}):
1244 """Add a new revision to current repository.
1245 """Add a new revision to current repository.
1245
1246
1246 Revision information is gathered from the working directory,
1247 Revision information is gathered from the working directory,
1247 match can be used to filter the committed files. If editor is
1248 match can be used to filter the committed files. If editor is
1248 supplied, it is called to get a commit message.
1249 supplied, it is called to get a commit message.
1249 """
1250 """
1250
1251
1251 def fail(f, msg):
1252 def fail(f, msg):
1252 raise util.Abort('%s: %s' % (f, msg))
1253 raise util.Abort('%s: %s' % (f, msg))
1253
1254
1254 if not match:
1255 if not match:
1255 match = matchmod.always(self.root, '')
1256 match = matchmod.always(self.root, '')
1256
1257
1257 if not force:
1258 if not force:
1258 vdirs = []
1259 vdirs = []
1259 match.dir = vdirs.append
1260 match.dir = vdirs.append
1260 match.bad = fail
1261 match.bad = fail
1261
1262
1262 wlock = self.wlock()
1263 wlock = self.wlock()
1263 try:
1264 try:
1264 wctx = self[None]
1265 wctx = self[None]
1265 merge = len(wctx.parents()) > 1
1266 merge = len(wctx.parents()) > 1
1266
1267
1267 if (not force and merge and match and
1268 if (not force and merge and match and
1268 (match.files() or match.anypats())):
1269 (match.files() or match.anypats())):
1269 raise util.Abort(_('cannot partially commit a merge '
1270 raise util.Abort(_('cannot partially commit a merge '
1270 '(do not specify files or patterns)'))
1271 '(do not specify files or patterns)'))
1271
1272
1272 changes = self.status(match=match, clean=force)
1273 changes = self.status(match=match, clean=force)
1273 if force:
1274 if force:
1274 changes[0].extend(changes[6]) # mq may commit unchanged files
1275 changes[0].extend(changes[6]) # mq may commit unchanged files
1275
1276
1276 # check subrepos
1277 # check subrepos
1277 subs = []
1278 subs = []
1278 commitsubs = set()
1279 commitsubs = set()
1279 newstate = wctx.substate.copy()
1280 newstate = wctx.substate.copy()
1280 # only manage subrepos and .hgsubstate if .hgsub is present
1281 # only manage subrepos and .hgsubstate if .hgsub is present
1281 if '.hgsub' in wctx:
1282 if '.hgsub' in wctx:
1282 # we'll decide whether to track this ourselves, thanks
1283 # we'll decide whether to track this ourselves, thanks
1283 if '.hgsubstate' in changes[0]:
1284 if '.hgsubstate' in changes[0]:
1284 changes[0].remove('.hgsubstate')
1285 changes[0].remove('.hgsubstate')
1285 if '.hgsubstate' in changes[2]:
1286 if '.hgsubstate' in changes[2]:
1286 changes[2].remove('.hgsubstate')
1287 changes[2].remove('.hgsubstate')
1287
1288
1288 # compare current state to last committed state
1289 # compare current state to last committed state
1289 # build new substate based on last committed state
1290 # build new substate based on last committed state
1290 oldstate = wctx.p1().substate
1291 oldstate = wctx.p1().substate
1291 for s in sorted(newstate.keys()):
1292 for s in sorted(newstate.keys()):
1292 if not match(s):
1293 if not match(s):
1293 # ignore working copy, use old state if present
1294 # ignore working copy, use old state if present
1294 if s in oldstate:
1295 if s in oldstate:
1295 newstate[s] = oldstate[s]
1296 newstate[s] = oldstate[s]
1296 continue
1297 continue
1297 if not force:
1298 if not force:
1298 raise util.Abort(
1299 raise util.Abort(
1299 _("commit with new subrepo %s excluded") % s)
1300 _("commit with new subrepo %s excluded") % s)
1300 if wctx.sub(s).dirty(True):
1301 if wctx.sub(s).dirty(True):
1301 if not self.ui.configbool('ui', 'commitsubrepos'):
1302 if not self.ui.configbool('ui', 'commitsubrepos'):
1302 raise util.Abort(
1303 raise util.Abort(
1303 _("uncommitted changes in subrepo %s") % s,
1304 _("uncommitted changes in subrepo %s") % s,
1304 hint=_("use --subrepos for recursive commit"))
1305 hint=_("use --subrepos for recursive commit"))
1305 subs.append(s)
1306 subs.append(s)
1306 commitsubs.add(s)
1307 commitsubs.add(s)
1307 else:
1308 else:
1308 bs = wctx.sub(s).basestate()
1309 bs = wctx.sub(s).basestate()
1309 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1310 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1310 if oldstate.get(s, (None, None, None))[1] != bs:
1311 if oldstate.get(s, (None, None, None))[1] != bs:
1311 subs.append(s)
1312 subs.append(s)
1312
1313
1313 # check for removed subrepos
1314 # check for removed subrepos
1314 for p in wctx.parents():
1315 for p in wctx.parents():
1315 r = [s for s in p.substate if s not in newstate]
1316 r = [s for s in p.substate if s not in newstate]
1316 subs += [s for s in r if match(s)]
1317 subs += [s for s in r if match(s)]
1317 if subs:
1318 if subs:
1318 if (not match('.hgsub') and
1319 if (not match('.hgsub') and
1319 '.hgsub' in (wctx.modified() + wctx.added())):
1320 '.hgsub' in (wctx.modified() + wctx.added())):
1320 raise util.Abort(
1321 raise util.Abort(
1321 _("can't commit subrepos without .hgsub"))
1322 _("can't commit subrepos without .hgsub"))
1322 changes[0].insert(0, '.hgsubstate')
1323 changes[0].insert(0, '.hgsubstate')
1323
1324
1324 elif '.hgsub' in changes[2]:
1325 elif '.hgsub' in changes[2]:
1325 # clean up .hgsubstate when .hgsub is removed
1326 # clean up .hgsubstate when .hgsub is removed
1326 if ('.hgsubstate' in wctx and
1327 if ('.hgsubstate' in wctx and
1327 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1328 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1328 changes[2].insert(0, '.hgsubstate')
1329 changes[2].insert(0, '.hgsubstate')
1329
1330
1330 # make sure all explicit patterns are matched
1331 # make sure all explicit patterns are matched
1331 if not force and match.files():
1332 if not force and match.files():
1332 matched = set(changes[0] + changes[1] + changes[2])
1333 matched = set(changes[0] + changes[1] + changes[2])
1333
1334
1334 for f in match.files():
1335 for f in match.files():
1335 f = self.dirstate.normalize(f)
1336 f = self.dirstate.normalize(f)
1336 if f == '.' or f in matched or f in wctx.substate:
1337 if f == '.' or f in matched or f in wctx.substate:
1337 continue
1338 continue
1338 if f in changes[3]: # missing
1339 if f in changes[3]: # missing
1339 fail(f, _('file not found!'))
1340 fail(f, _('file not found!'))
1340 if f in vdirs: # visited directory
1341 if f in vdirs: # visited directory
1341 d = f + '/'
1342 d = f + '/'
1342 for mf in matched:
1343 for mf in matched:
1343 if mf.startswith(d):
1344 if mf.startswith(d):
1344 break
1345 break
1345 else:
1346 else:
1346 fail(f, _("no match under directory!"))
1347 fail(f, _("no match under directory!"))
1347 elif f not in self.dirstate:
1348 elif f not in self.dirstate:
1348 fail(f, _("file not tracked!"))
1349 fail(f, _("file not tracked!"))
1349
1350
1350 if (not force and not extra.get("close") and not merge
1351 if (not force and not extra.get("close") and not merge
1351 and not (changes[0] or changes[1] or changes[2])
1352 and not (changes[0] or changes[1] or changes[2])
1352 and wctx.branch() == wctx.p1().branch()):
1353 and wctx.branch() == wctx.p1().branch()):
1353 return None
1354 return None
1354
1355
1355 if merge and changes[3]:
1356 if merge and changes[3]:
1356 raise util.Abort(_("cannot commit merge with missing files"))
1357 raise util.Abort(_("cannot commit merge with missing files"))
1357
1358
1358 ms = mergemod.mergestate(self)
1359 ms = mergemod.mergestate(self)
1359 for f in changes[0]:
1360 for f in changes[0]:
1360 if f in ms and ms[f] == 'u':
1361 if f in ms and ms[f] == 'u':
1361 raise util.Abort(_("unresolved merge conflicts "
1362 raise util.Abort(_("unresolved merge conflicts "
1362 "(see hg help resolve)"))
1363 "(see hg help resolve)"))
1363
1364
1364 cctx = context.workingctx(self, text, user, date, extra, changes)
1365 cctx = context.workingctx(self, text, user, date, extra, changes)
1365 if editor:
1366 if editor:
1366 cctx._text = editor(self, cctx, subs)
1367 cctx._text = editor(self, cctx, subs)
1367 edited = (text != cctx._text)
1368 edited = (text != cctx._text)
1368
1369
1369 # commit subs and write new state
1370 # commit subs and write new state
1370 if subs:
1371 if subs:
1371 for s in sorted(commitsubs):
1372 for s in sorted(commitsubs):
1372 sub = wctx.sub(s)
1373 sub = wctx.sub(s)
1373 self.ui.status(_('committing subrepository %s\n') %
1374 self.ui.status(_('committing subrepository %s\n') %
1374 subrepo.subrelpath(sub))
1375 subrepo.subrelpath(sub))
1375 sr = sub.commit(cctx._text, user, date)
1376 sr = sub.commit(cctx._text, user, date)
1376 newstate[s] = (newstate[s][0], sr)
1377 newstate[s] = (newstate[s][0], sr)
1377 subrepo.writestate(self, newstate)
1378 subrepo.writestate(self, newstate)
1378
1379
1379 # Save commit message in case this transaction gets rolled back
1380 # Save commit message in case this transaction gets rolled back
1380 # (e.g. by a pretxncommit hook). Leave the content alone on
1381 # (e.g. by a pretxncommit hook). Leave the content alone on
1381 # the assumption that the user will use the same editor again.
1382 # the assumption that the user will use the same editor again.
1382 msgfn = self.savecommitmessage(cctx._text)
1383 msgfn = self.savecommitmessage(cctx._text)
1383
1384
1384 p1, p2 = self.dirstate.parents()
1385 p1, p2 = self.dirstate.parents()
1385 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1386 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1386 try:
1387 try:
1387 self.hook("precommit", throw=True, parent1=hookp1,
1388 self.hook("precommit", throw=True, parent1=hookp1,
1388 parent2=hookp2)
1389 parent2=hookp2)
1389 ret = self.commitctx(cctx, True)
1390 ret = self.commitctx(cctx, True)
1390 except: # re-raises
1391 except: # re-raises
1391 if edited:
1392 if edited:
1392 self.ui.write(
1393 self.ui.write(
1393 _('note: commit message saved in %s\n') % msgfn)
1394 _('note: commit message saved in %s\n') % msgfn)
1394 raise
1395 raise
1395
1396
1396 # update bookmarks, dirstate and mergestate
1397 # update bookmarks, dirstate and mergestate
1397 bookmarks.update(self, [p1, p2], ret)
1398 bookmarks.update(self, [p1, p2], ret)
1398 for f in changes[0] + changes[1]:
1399 for f in changes[0] + changes[1]:
1399 self.dirstate.normal(f)
1400 self.dirstate.normal(f)
1400 for f in changes[2]:
1401 for f in changes[2]:
1401 self.dirstate.drop(f)
1402 self.dirstate.drop(f)
1402 self.dirstate.setparents(ret)
1403 self.dirstate.setparents(ret)
1403 ms.reset()
1404 ms.reset()
1404 finally:
1405 finally:
1405 wlock.release()
1406 wlock.release()
1406
1407
1407 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1408 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1408 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1409 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1409 self._afterlock(commithook)
1410 self._afterlock(commithook)
1410 return ret
1411 return ret
1411
1412
1412 def commitctx(self, ctx, error=False):
1413 def commitctx(self, ctx, error=False):
1413 """Add a new revision to current repository.
1414 """Add a new revision to current repository.
1414 Revision information is passed via the context argument.
1415 Revision information is passed via the context argument.
1415 """
1416 """
1416
1417
1417 tr = lock = None
1418 tr = lock = None
1418 removed = list(ctx.removed())
1419 removed = list(ctx.removed())
1419 p1, p2 = ctx.p1(), ctx.p2()
1420 p1, p2 = ctx.p1(), ctx.p2()
1420 user = ctx.user()
1421 user = ctx.user()
1421
1422
1422 lock = self.lock()
1423 lock = self.lock()
1423 try:
1424 try:
1424 tr = self.transaction("commit")
1425 tr = self.transaction("commit")
1425 trp = weakref.proxy(tr)
1426 trp = weakref.proxy(tr)
1426
1427
1427 if ctx.files():
1428 if ctx.files():
1428 m1 = p1.manifest().copy()
1429 m1 = p1.manifest().copy()
1429 m2 = p2.manifest()
1430 m2 = p2.manifest()
1430
1431
1431 # check in files
1432 # check in files
1432 new = {}
1433 new = {}
1433 changed = []
1434 changed = []
1434 linkrev = len(self)
1435 linkrev = len(self)
1435 for f in sorted(ctx.modified() + ctx.added()):
1436 for f in sorted(ctx.modified() + ctx.added()):
1436 self.ui.note(f + "\n")
1437 self.ui.note(f + "\n")
1437 try:
1438 try:
1438 fctx = ctx[f]
1439 fctx = ctx[f]
1439 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1440 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1440 changed)
1441 changed)
1441 m1.set(f, fctx.flags())
1442 m1.set(f, fctx.flags())
1442 except OSError, inst:
1443 except OSError, inst:
1443 self.ui.warn(_("trouble committing %s!\n") % f)
1444 self.ui.warn(_("trouble committing %s!\n") % f)
1444 raise
1445 raise
1445 except IOError, inst:
1446 except IOError, inst:
1446 errcode = getattr(inst, 'errno', errno.ENOENT)
1447 errcode = getattr(inst, 'errno', errno.ENOENT)
1447 if error or errcode and errcode != errno.ENOENT:
1448 if error or errcode and errcode != errno.ENOENT:
1448 self.ui.warn(_("trouble committing %s!\n") % f)
1449 self.ui.warn(_("trouble committing %s!\n") % f)
1449 raise
1450 raise
1450 else:
1451 else:
1451 removed.append(f)
1452 removed.append(f)
1452
1453
1453 # update manifest
1454 # update manifest
1454 m1.update(new)
1455 m1.update(new)
1455 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1456 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1456 drop = [f for f in removed if f in m1]
1457 drop = [f for f in removed if f in m1]
1457 for f in drop:
1458 for f in drop:
1458 del m1[f]
1459 del m1[f]
1459 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1460 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1460 p2.manifestnode(), (new, drop))
1461 p2.manifestnode(), (new, drop))
1461 files = changed + removed
1462 files = changed + removed
1462 else:
1463 else:
1463 mn = p1.manifestnode()
1464 mn = p1.manifestnode()
1464 files = []
1465 files = []
1465
1466
1466 # update changelog
1467 # update changelog
1467 self.changelog.delayupdate()
1468 self.changelog.delayupdate()
1468 n = self.changelog.add(mn, files, ctx.description(),
1469 n = self.changelog.add(mn, files, ctx.description(),
1469 trp, p1.node(), p2.node(),
1470 trp, p1.node(), p2.node(),
1470 user, ctx.date(), ctx.extra().copy())
1471 user, ctx.date(), ctx.extra().copy())
1471 p = lambda: self.changelog.writepending() and self.root or ""
1472 p = lambda: self.changelog.writepending() and self.root or ""
1472 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1473 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1473 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1474 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1474 parent2=xp2, pending=p)
1475 parent2=xp2, pending=p)
1475 self.changelog.finalize(trp)
1476 self.changelog.finalize(trp)
1476 # set the new commit is proper phase
1477 # set the new commit is proper phase
1477 targetphase = phases.newcommitphase(self.ui)
1478 targetphase = phases.newcommitphase(self.ui)
1478 if targetphase:
1479 if targetphase:
1479 # retract boundary do not alter parent changeset.
1480 # retract boundary do not alter parent changeset.
1480 # if a parent have higher the resulting phase will
1481 # if a parent have higher the resulting phase will
1481 # be compliant anyway
1482 # be compliant anyway
1482 #
1483 #
1483 # if minimal phase was 0 we don't need to retract anything
1484 # if minimal phase was 0 we don't need to retract anything
1484 phases.retractboundary(self, targetphase, [n])
1485 phases.retractboundary(self, targetphase, [n])
1485 tr.close()
1486 tr.close()
1486 self.updatebranchcache()
1487 self.updatebranchcache()
1487 return n
1488 return n
1488 finally:
1489 finally:
1489 if tr:
1490 if tr:
1490 tr.release()
1491 tr.release()
1491 lock.release()
1492 lock.release()
1492
1493
1493 @unfilteredmeth
1494 @unfilteredmeth
1494 def destroyed(self, newheadnodes=None):
1495 def destroyed(self, newheadnodes=None):
1495 '''Inform the repository that nodes have been destroyed.
1496 '''Inform the repository that nodes have been destroyed.
1496 Intended for use by strip and rollback, so there's a common
1497 Intended for use by strip and rollback, so there's a common
1497 place for anything that has to be done after destroying history.
1498 place for anything that has to be done after destroying history.
1498
1499
1499 If you know the branchheadcache was uptodate before nodes were removed
1500 If you know the branchheadcache was uptodate before nodes were removed
1500 and you also know the set of candidate new heads that may have resulted
1501 and you also know the set of candidate new heads that may have resulted
1501 from the destruction, you can set newheadnodes. This will enable the
1502 from the destruction, you can set newheadnodes. This will enable the
1502 code to update the branchheads cache, rather than having future code
1503 code to update the branchheads cache, rather than having future code
1503 decide it's invalid and regenerating it from scratch.
1504 decide it's invalid and regenerating it from scratch.
1504 '''
1505 '''
1505 # If we have info, newheadnodes, on how to update the branch cache, do
1506 # If we have info, newheadnodes, on how to update the branch cache, do
1506 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1507 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1507 # will be caught the next time it is read.
1508 # will be caught the next time it is read.
1508 if newheadnodes:
1509 if newheadnodes:
1509 tiprev = len(self) - 1
1510 tiprev = len(self) - 1
1510 ctxgen = (self[node] for node in newheadnodes
1511 ctxgen = (self[node] for node in newheadnodes
1511 if self.changelog.hasnode(node))
1512 if self.changelog.hasnode(node))
1512 self._updatebranchcache(self._branchcache, ctxgen)
1513 self._updatebranchcache(self._branchcache, ctxgen)
1513 self._writebranchcache(self._branchcache, self.changelog.tip(),
1514 self._writebranchcache(self._branchcache, self.changelog.tip(),
1514 tiprev)
1515 tiprev)
1515
1516
1516 # Ensure the persistent tag cache is updated. Doing it now
1517 # Ensure the persistent tag cache is updated. Doing it now
1517 # means that the tag cache only has to worry about destroyed
1518 # means that the tag cache only has to worry about destroyed
1518 # heads immediately after a strip/rollback. That in turn
1519 # heads immediately after a strip/rollback. That in turn
1519 # guarantees that "cachetip == currenttip" (comparing both rev
1520 # guarantees that "cachetip == currenttip" (comparing both rev
1520 # and node) always means no nodes have been added or destroyed.
1521 # and node) always means no nodes have been added or destroyed.
1521
1522
1522 # XXX this is suboptimal when qrefresh'ing: we strip the current
1523 # XXX this is suboptimal when qrefresh'ing: we strip the current
1523 # head, refresh the tag cache, then immediately add a new head.
1524 # head, refresh the tag cache, then immediately add a new head.
1524 # But I think doing it this way is necessary for the "instant
1525 # But I think doing it this way is necessary for the "instant
1525 # tag cache retrieval" case to work.
1526 # tag cache retrieval" case to work.
1526 self.invalidatecaches()
1527 self.invalidatecaches()
1527
1528
1528 # Discard all cache entries to force reloading everything.
1529 # Discard all cache entries to force reloading everything.
1529 self._filecache.clear()
1530 self._filecache.clear()
1530
1531
1531 def walk(self, match, node=None):
1532 def walk(self, match, node=None):
1532 '''
1533 '''
1533 walk recursively through the directory tree or a given
1534 walk recursively through the directory tree or a given
1534 changeset, finding all files matched by the match
1535 changeset, finding all files matched by the match
1535 function
1536 function
1536 '''
1537 '''
1537 return self[node].walk(match)
1538 return self[node].walk(match)
1538
1539
1539 def status(self, node1='.', node2=None, match=None,
1540 def status(self, node1='.', node2=None, match=None,
1540 ignored=False, clean=False, unknown=False,
1541 ignored=False, clean=False, unknown=False,
1541 listsubrepos=False):
1542 listsubrepos=False):
1542 """return status of files between two nodes or node and working
1543 """return status of files between two nodes or node and working
1543 directory.
1544 directory.
1544
1545
1545 If node1 is None, use the first dirstate parent instead.
1546 If node1 is None, use the first dirstate parent instead.
1546 If node2 is None, compare node1 with working directory.
1547 If node2 is None, compare node1 with working directory.
1547 """
1548 """
1548
1549
1549 def mfmatches(ctx):
1550 def mfmatches(ctx):
1550 mf = ctx.manifest().copy()
1551 mf = ctx.manifest().copy()
1551 if match.always():
1552 if match.always():
1552 return mf
1553 return mf
1553 for fn in mf.keys():
1554 for fn in mf.keys():
1554 if not match(fn):
1555 if not match(fn):
1555 del mf[fn]
1556 del mf[fn]
1556 return mf
1557 return mf
1557
1558
1558 if isinstance(node1, context.changectx):
1559 if isinstance(node1, context.changectx):
1559 ctx1 = node1
1560 ctx1 = node1
1560 else:
1561 else:
1561 ctx1 = self[node1]
1562 ctx1 = self[node1]
1562 if isinstance(node2, context.changectx):
1563 if isinstance(node2, context.changectx):
1563 ctx2 = node2
1564 ctx2 = node2
1564 else:
1565 else:
1565 ctx2 = self[node2]
1566 ctx2 = self[node2]
1566
1567
1567 working = ctx2.rev() is None
1568 working = ctx2.rev() is None
1568 parentworking = working and ctx1 == self['.']
1569 parentworking = working and ctx1 == self['.']
1569 match = match or matchmod.always(self.root, self.getcwd())
1570 match = match or matchmod.always(self.root, self.getcwd())
1570 listignored, listclean, listunknown = ignored, clean, unknown
1571 listignored, listclean, listunknown = ignored, clean, unknown
1571
1572
1572 # load earliest manifest first for caching reasons
1573 # load earliest manifest first for caching reasons
1573 if not working and ctx2.rev() < ctx1.rev():
1574 if not working and ctx2.rev() < ctx1.rev():
1574 ctx2.manifest()
1575 ctx2.manifest()
1575
1576
1576 if not parentworking:
1577 if not parentworking:
1577 def bad(f, msg):
1578 def bad(f, msg):
1578 # 'f' may be a directory pattern from 'match.files()',
1579 # 'f' may be a directory pattern from 'match.files()',
1579 # so 'f not in ctx1' is not enough
1580 # so 'f not in ctx1' is not enough
1580 if f not in ctx1 and f not in ctx1.dirs():
1581 if f not in ctx1 and f not in ctx1.dirs():
1581 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1582 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1582 match.bad = bad
1583 match.bad = bad
1583
1584
1584 if working: # we need to scan the working dir
1585 if working: # we need to scan the working dir
1585 subrepos = []
1586 subrepos = []
1586 if '.hgsub' in self.dirstate:
1587 if '.hgsub' in self.dirstate:
1587 subrepos = ctx2.substate.keys()
1588 subrepos = ctx2.substate.keys()
1588 s = self.dirstate.status(match, subrepos, listignored,
1589 s = self.dirstate.status(match, subrepos, listignored,
1589 listclean, listunknown)
1590 listclean, listunknown)
1590 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1591 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1591
1592
1592 # check for any possibly clean files
1593 # check for any possibly clean files
1593 if parentworking and cmp:
1594 if parentworking and cmp:
1594 fixup = []
1595 fixup = []
1595 # do a full compare of any files that might have changed
1596 # do a full compare of any files that might have changed
1596 for f in sorted(cmp):
1597 for f in sorted(cmp):
1597 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1598 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1598 or ctx1[f].cmp(ctx2[f])):
1599 or ctx1[f].cmp(ctx2[f])):
1599 modified.append(f)
1600 modified.append(f)
1600 else:
1601 else:
1601 fixup.append(f)
1602 fixup.append(f)
1602
1603
1603 # update dirstate for files that are actually clean
1604 # update dirstate for files that are actually clean
1604 if fixup:
1605 if fixup:
1605 if listclean:
1606 if listclean:
1606 clean += fixup
1607 clean += fixup
1607
1608
1608 try:
1609 try:
1609 # updating the dirstate is optional
1610 # updating the dirstate is optional
1610 # so we don't wait on the lock
1611 # so we don't wait on the lock
1611 wlock = self.wlock(False)
1612 wlock = self.wlock(False)
1612 try:
1613 try:
1613 for f in fixup:
1614 for f in fixup:
1614 self.dirstate.normal(f)
1615 self.dirstate.normal(f)
1615 finally:
1616 finally:
1616 wlock.release()
1617 wlock.release()
1617 except error.LockError:
1618 except error.LockError:
1618 pass
1619 pass
1619
1620
1620 if not parentworking:
1621 if not parentworking:
1621 mf1 = mfmatches(ctx1)
1622 mf1 = mfmatches(ctx1)
1622 if working:
1623 if working:
1623 # we are comparing working dir against non-parent
1624 # we are comparing working dir against non-parent
1624 # generate a pseudo-manifest for the working dir
1625 # generate a pseudo-manifest for the working dir
1625 mf2 = mfmatches(self['.'])
1626 mf2 = mfmatches(self['.'])
1626 for f in cmp + modified + added:
1627 for f in cmp + modified + added:
1627 mf2[f] = None
1628 mf2[f] = None
1628 mf2.set(f, ctx2.flags(f))
1629 mf2.set(f, ctx2.flags(f))
1629 for f in removed:
1630 for f in removed:
1630 if f in mf2:
1631 if f in mf2:
1631 del mf2[f]
1632 del mf2[f]
1632 else:
1633 else:
1633 # we are comparing two revisions
1634 # we are comparing two revisions
1634 deleted, unknown, ignored = [], [], []
1635 deleted, unknown, ignored = [], [], []
1635 mf2 = mfmatches(ctx2)
1636 mf2 = mfmatches(ctx2)
1636
1637
1637 modified, added, clean = [], [], []
1638 modified, added, clean = [], [], []
1638 withflags = mf1.withflags() | mf2.withflags()
1639 withflags = mf1.withflags() | mf2.withflags()
1639 for fn in mf2:
1640 for fn in mf2:
1640 if fn in mf1:
1641 if fn in mf1:
1641 if (fn not in deleted and
1642 if (fn not in deleted and
1642 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1643 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1643 (mf1[fn] != mf2[fn] and
1644 (mf1[fn] != mf2[fn] and
1644 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1645 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1645 modified.append(fn)
1646 modified.append(fn)
1646 elif listclean:
1647 elif listclean:
1647 clean.append(fn)
1648 clean.append(fn)
1648 del mf1[fn]
1649 del mf1[fn]
1649 elif fn not in deleted:
1650 elif fn not in deleted:
1650 added.append(fn)
1651 added.append(fn)
1651 removed = mf1.keys()
1652 removed = mf1.keys()
1652
1653
1653 if working and modified and not self.dirstate._checklink:
1654 if working and modified and not self.dirstate._checklink:
1654 # Symlink placeholders may get non-symlink-like contents
1655 # Symlink placeholders may get non-symlink-like contents
1655 # via user error or dereferencing by NFS or Samba servers,
1656 # via user error or dereferencing by NFS or Samba servers,
1656 # so we filter out any placeholders that don't look like a
1657 # so we filter out any placeholders that don't look like a
1657 # symlink
1658 # symlink
1658 sane = []
1659 sane = []
1659 for f in modified:
1660 for f in modified:
1660 if ctx2.flags(f) == 'l':
1661 if ctx2.flags(f) == 'l':
1661 d = ctx2[f].data()
1662 d = ctx2[f].data()
1662 if len(d) >= 1024 or '\n' in d or util.binary(d):
1663 if len(d) >= 1024 or '\n' in d or util.binary(d):
1663 self.ui.debug('ignoring suspect symlink placeholder'
1664 self.ui.debug('ignoring suspect symlink placeholder'
1664 ' "%s"\n' % f)
1665 ' "%s"\n' % f)
1665 continue
1666 continue
1666 sane.append(f)
1667 sane.append(f)
1667 modified = sane
1668 modified = sane
1668
1669
1669 r = modified, added, removed, deleted, unknown, ignored, clean
1670 r = modified, added, removed, deleted, unknown, ignored, clean
1670
1671
1671 if listsubrepos:
1672 if listsubrepos:
1672 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1673 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1673 if working:
1674 if working:
1674 rev2 = None
1675 rev2 = None
1675 else:
1676 else:
1676 rev2 = ctx2.substate[subpath][1]
1677 rev2 = ctx2.substate[subpath][1]
1677 try:
1678 try:
1678 submatch = matchmod.narrowmatcher(subpath, match)
1679 submatch = matchmod.narrowmatcher(subpath, match)
1679 s = sub.status(rev2, match=submatch, ignored=listignored,
1680 s = sub.status(rev2, match=submatch, ignored=listignored,
1680 clean=listclean, unknown=listunknown,
1681 clean=listclean, unknown=listunknown,
1681 listsubrepos=True)
1682 listsubrepos=True)
1682 for rfiles, sfiles in zip(r, s):
1683 for rfiles, sfiles in zip(r, s):
1683 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1684 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1684 except error.LookupError:
1685 except error.LookupError:
1685 self.ui.status(_("skipping missing subrepository: %s\n")
1686 self.ui.status(_("skipping missing subrepository: %s\n")
1686 % subpath)
1687 % subpath)
1687
1688
1688 for l in r:
1689 for l in r:
1689 l.sort()
1690 l.sort()
1690 return r
1691 return r
1691
1692
1692 def heads(self, start=None):
1693 def heads(self, start=None):
1693 heads = self.changelog.heads(start)
1694 heads = self.changelog.heads(start)
1694 # sort the output in rev descending order
1695 # sort the output in rev descending order
1695 return sorted(heads, key=self.changelog.rev, reverse=True)
1696 return sorted(heads, key=self.changelog.rev, reverse=True)
1696
1697
1697 def branchheads(self, branch=None, start=None, closed=False):
1698 def branchheads(self, branch=None, start=None, closed=False):
1698 '''return a (possibly filtered) list of heads for the given branch
1699 '''return a (possibly filtered) list of heads for the given branch
1699
1700
1700 Heads are returned in topological order, from newest to oldest.
1701 Heads are returned in topological order, from newest to oldest.
1701 If branch is None, use the dirstate branch.
1702 If branch is None, use the dirstate branch.
1702 If start is not None, return only heads reachable from start.
1703 If start is not None, return only heads reachable from start.
1703 If closed is True, return heads that are marked as closed as well.
1704 If closed is True, return heads that are marked as closed as well.
1704 '''
1705 '''
1705 if branch is None:
1706 if branch is None:
1706 branch = self[None].branch()
1707 branch = self[None].branch()
1707 branches = self.branchmap()
1708 branches = self.branchmap()
1708 if branch not in branches:
1709 if branch not in branches:
1709 return []
1710 return []
1710 # the cache returns heads ordered lowest to highest
1711 # the cache returns heads ordered lowest to highest
1711 bheads = list(reversed(branches[branch]))
1712 bheads = list(reversed(branches[branch]))
1712 if start is not None:
1713 if start is not None:
1713 # filter out the heads that cannot be reached from startrev
1714 # filter out the heads that cannot be reached from startrev
1714 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1715 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1715 bheads = [h for h in bheads if h in fbheads]
1716 bheads = [h for h in bheads if h in fbheads]
1716 if not closed:
1717 if not closed:
1717 bheads = [h for h in bheads if not self[h].closesbranch()]
1718 bheads = [h for h in bheads if not self[h].closesbranch()]
1718 return bheads
1719 return bheads
1719
1720
1720 def branches(self, nodes):
1721 def branches(self, nodes):
1721 if not nodes:
1722 if not nodes:
1722 nodes = [self.changelog.tip()]
1723 nodes = [self.changelog.tip()]
1723 b = []
1724 b = []
1724 for n in nodes:
1725 for n in nodes:
1725 t = n
1726 t = n
1726 while True:
1727 while True:
1727 p = self.changelog.parents(n)
1728 p = self.changelog.parents(n)
1728 if p[1] != nullid or p[0] == nullid:
1729 if p[1] != nullid or p[0] == nullid:
1729 b.append((t, n, p[0], p[1]))
1730 b.append((t, n, p[0], p[1]))
1730 break
1731 break
1731 n = p[0]
1732 n = p[0]
1732 return b
1733 return b
1733
1734
1734 def between(self, pairs):
1735 def between(self, pairs):
1735 r = []
1736 r = []
1736
1737
1737 for top, bottom in pairs:
1738 for top, bottom in pairs:
1738 n, l, i = top, [], 0
1739 n, l, i = top, [], 0
1739 f = 1
1740 f = 1
1740
1741
1741 while n != bottom and n != nullid:
1742 while n != bottom and n != nullid:
1742 p = self.changelog.parents(n)[0]
1743 p = self.changelog.parents(n)[0]
1743 if i == f:
1744 if i == f:
1744 l.append(n)
1745 l.append(n)
1745 f = f * 2
1746 f = f * 2
1746 n = p
1747 n = p
1747 i += 1
1748 i += 1
1748
1749
1749 r.append(l)
1750 r.append(l)
1750
1751
1751 return r
1752 return r
1752
1753
1753 def pull(self, remote, heads=None, force=False):
1754 def pull(self, remote, heads=None, force=False):
1754 # don't open transaction for nothing or you break future useful
1755 # don't open transaction for nothing or you break future useful
1755 # rollback call
1756 # rollback call
1756 tr = None
1757 tr = None
1757 trname = 'pull\n' + util.hidepassword(remote.url())
1758 trname = 'pull\n' + util.hidepassword(remote.url())
1758 lock = self.lock()
1759 lock = self.lock()
1759 try:
1760 try:
1760 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1761 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1761 force=force)
1762 force=force)
1762 common, fetch, rheads = tmp
1763 common, fetch, rheads = tmp
1763 if not fetch:
1764 if not fetch:
1764 self.ui.status(_("no changes found\n"))
1765 self.ui.status(_("no changes found\n"))
1765 added = []
1766 added = []
1766 result = 0
1767 result = 0
1767 else:
1768 else:
1768 tr = self.transaction(trname)
1769 tr = self.transaction(trname)
1769 if heads is None and list(common) == [nullid]:
1770 if heads is None and list(common) == [nullid]:
1770 self.ui.status(_("requesting all changes\n"))
1771 self.ui.status(_("requesting all changes\n"))
1771 elif heads is None and remote.capable('changegroupsubset'):
1772 elif heads is None and remote.capable('changegroupsubset'):
1772 # issue1320, avoid a race if remote changed after discovery
1773 # issue1320, avoid a race if remote changed after discovery
1773 heads = rheads
1774 heads = rheads
1774
1775
1775 if remote.capable('getbundle'):
1776 if remote.capable('getbundle'):
1776 cg = remote.getbundle('pull', common=common,
1777 cg = remote.getbundle('pull', common=common,
1777 heads=heads or rheads)
1778 heads=heads or rheads)
1778 elif heads is None:
1779 elif heads is None:
1779 cg = remote.changegroup(fetch, 'pull')
1780 cg = remote.changegroup(fetch, 'pull')
1780 elif not remote.capable('changegroupsubset'):
1781 elif not remote.capable('changegroupsubset'):
1781 raise util.Abort(_("partial pull cannot be done because "
1782 raise util.Abort(_("partial pull cannot be done because "
1782 "other repository doesn't support "
1783 "other repository doesn't support "
1783 "changegroupsubset."))
1784 "changegroupsubset."))
1784 else:
1785 else:
1785 cg = remote.changegroupsubset(fetch, heads, 'pull')
1786 cg = remote.changegroupsubset(fetch, heads, 'pull')
1786 clstart = len(self.changelog)
1787 clstart = len(self.changelog)
1787 result = self.addchangegroup(cg, 'pull', remote.url())
1788 result = self.addchangegroup(cg, 'pull', remote.url())
1788 clend = len(self.changelog)
1789 clend = len(self.changelog)
1789 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1790 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1790
1791
1791 # compute target subset
1792 # compute target subset
1792 if heads is None:
1793 if heads is None:
1793 # We pulled every thing possible
1794 # We pulled every thing possible
1794 # sync on everything common
1795 # sync on everything common
1795 subset = common + added
1796 subset = common + added
1796 else:
1797 else:
1797 # We pulled a specific subset
1798 # We pulled a specific subset
1798 # sync on this subset
1799 # sync on this subset
1799 subset = heads
1800 subset = heads
1800
1801
1801 # Get remote phases data from remote
1802 # Get remote phases data from remote
1802 remotephases = remote.listkeys('phases')
1803 remotephases = remote.listkeys('phases')
1803 publishing = bool(remotephases.get('publishing', False))
1804 publishing = bool(remotephases.get('publishing', False))
1804 if remotephases and not publishing:
1805 if remotephases and not publishing:
1805 # remote is new and unpublishing
1806 # remote is new and unpublishing
1806 pheads, _dr = phases.analyzeremotephases(self, subset,
1807 pheads, _dr = phases.analyzeremotephases(self, subset,
1807 remotephases)
1808 remotephases)
1808 phases.advanceboundary(self, phases.public, pheads)
1809 phases.advanceboundary(self, phases.public, pheads)
1809 phases.advanceboundary(self, phases.draft, subset)
1810 phases.advanceboundary(self, phases.draft, subset)
1810 else:
1811 else:
1811 # Remote is old or publishing all common changesets
1812 # Remote is old or publishing all common changesets
1812 # should be seen as public
1813 # should be seen as public
1813 phases.advanceboundary(self, phases.public, subset)
1814 phases.advanceboundary(self, phases.public, subset)
1814
1815
1815 if obsolete._enabled:
1816 if obsolete._enabled:
1816 self.ui.debug('fetching remote obsolete markers\n')
1817 self.ui.debug('fetching remote obsolete markers\n')
1817 remoteobs = remote.listkeys('obsolete')
1818 remoteobs = remote.listkeys('obsolete')
1818 if 'dump0' in remoteobs:
1819 if 'dump0' in remoteobs:
1819 if tr is None:
1820 if tr is None:
1820 tr = self.transaction(trname)
1821 tr = self.transaction(trname)
1821 for key in sorted(remoteobs, reverse=True):
1822 for key in sorted(remoteobs, reverse=True):
1822 if key.startswith('dump'):
1823 if key.startswith('dump'):
1823 data = base85.b85decode(remoteobs[key])
1824 data = base85.b85decode(remoteobs[key])
1824 self.obsstore.mergemarkers(tr, data)
1825 self.obsstore.mergemarkers(tr, data)
1825 if tr is not None:
1826 if tr is not None:
1826 tr.close()
1827 tr.close()
1827 finally:
1828 finally:
1828 if tr is not None:
1829 if tr is not None:
1829 tr.release()
1830 tr.release()
1830 lock.release()
1831 lock.release()
1831
1832
1832 return result
1833 return result
1833
1834
1834 def checkpush(self, force, revs):
1835 def checkpush(self, force, revs):
1835 """Extensions can override this function if additional checks have
1836 """Extensions can override this function if additional checks have
1836 to be performed before pushing, or call it if they override push
1837 to be performed before pushing, or call it if they override push
1837 command.
1838 command.
1838 """
1839 """
1839 pass
1840 pass
1840
1841
1841 def push(self, remote, force=False, revs=None, newbranch=False):
1842 def push(self, remote, force=False, revs=None, newbranch=False):
1842 '''Push outgoing changesets (limited by revs) from the current
1843 '''Push outgoing changesets (limited by revs) from the current
1843 repository to remote. Return an integer:
1844 repository to remote. Return an integer:
1844 - None means nothing to push
1845 - None means nothing to push
1845 - 0 means HTTP error
1846 - 0 means HTTP error
1846 - 1 means we pushed and remote head count is unchanged *or*
1847 - 1 means we pushed and remote head count is unchanged *or*
1847 we have outgoing changesets but refused to push
1848 we have outgoing changesets but refused to push
1848 - other values as described by addchangegroup()
1849 - other values as described by addchangegroup()
1849 '''
1850 '''
1850 # there are two ways to push to remote repo:
1851 # there are two ways to push to remote repo:
1851 #
1852 #
1852 # addchangegroup assumes local user can lock remote
1853 # addchangegroup assumes local user can lock remote
1853 # repo (local filesystem, old ssh servers).
1854 # repo (local filesystem, old ssh servers).
1854 #
1855 #
1855 # unbundle assumes local user cannot lock remote repo (new ssh
1856 # unbundle assumes local user cannot lock remote repo (new ssh
1856 # servers, http servers).
1857 # servers, http servers).
1857
1858
1858 if not remote.canpush():
1859 if not remote.canpush():
1859 raise util.Abort(_("destination does not support push"))
1860 raise util.Abort(_("destination does not support push"))
1860 # get local lock as we might write phase data
1861 # get local lock as we might write phase data
1861 locallock = self.lock()
1862 locallock = self.lock()
1862 try:
1863 try:
1863 self.checkpush(force, revs)
1864 self.checkpush(force, revs)
1864 lock = None
1865 lock = None
1865 unbundle = remote.capable('unbundle')
1866 unbundle = remote.capable('unbundle')
1866 if not unbundle:
1867 if not unbundle:
1867 lock = remote.lock()
1868 lock = remote.lock()
1868 try:
1869 try:
1869 # discovery
1870 # discovery
1870 fci = discovery.findcommonincoming
1871 fci = discovery.findcommonincoming
1871 commoninc = fci(self, remote, force=force)
1872 commoninc = fci(self, remote, force=force)
1872 common, inc, remoteheads = commoninc
1873 common, inc, remoteheads = commoninc
1873 fco = discovery.findcommonoutgoing
1874 fco = discovery.findcommonoutgoing
1874 outgoing = fco(self, remote, onlyheads=revs,
1875 outgoing = fco(self, remote, onlyheads=revs,
1875 commoninc=commoninc, force=force)
1876 commoninc=commoninc, force=force)
1876
1877
1877
1878
1878 if not outgoing.missing:
1879 if not outgoing.missing:
1879 # nothing to push
1880 # nothing to push
1880 scmutil.nochangesfound(self.ui, self, outgoing.excluded)
1881 scmutil.nochangesfound(self.ui, self, outgoing.excluded)
1881 ret = None
1882 ret = None
1882 else:
1883 else:
1883 # something to push
1884 # something to push
1884 if not force:
1885 if not force:
1885 # if self.obsstore == False --> no obsolete
1886 # if self.obsstore == False --> no obsolete
1886 # then, save the iteration
1887 # then, save the iteration
1887 if self.obsstore:
1888 if self.obsstore:
1888 # this message are here for 80 char limit reason
1889 # this message are here for 80 char limit reason
1889 mso = _("push includes obsolete changeset: %s!")
1890 mso = _("push includes obsolete changeset: %s!")
1890 msu = _("push includes unstable changeset: %s!")
1891 msu = _("push includes unstable changeset: %s!")
1891 msb = _("push includes bumped changeset: %s!")
1892 msb = _("push includes bumped changeset: %s!")
1892 # If we are to push if there is at least one
1893 # If we are to push if there is at least one
1893 # obsolete or unstable changeset in missing, at
1894 # obsolete or unstable changeset in missing, at
1894 # least one of the missinghead will be obsolete or
1895 # least one of the missinghead will be obsolete or
1895 # unstable. So checking heads only is ok
1896 # unstable. So checking heads only is ok
1896 for node in outgoing.missingheads:
1897 for node in outgoing.missingheads:
1897 ctx = self[node]
1898 ctx = self[node]
1898 if ctx.obsolete():
1899 if ctx.obsolete():
1899 raise util.Abort(mso % ctx)
1900 raise util.Abort(mso % ctx)
1900 elif ctx.unstable():
1901 elif ctx.unstable():
1901 raise util.Abort(msu % ctx)
1902 raise util.Abort(msu % ctx)
1902 elif ctx.bumped():
1903 elif ctx.bumped():
1903 raise util.Abort(msb % ctx)
1904 raise util.Abort(msb % ctx)
1904 discovery.checkheads(self, remote, outgoing,
1905 discovery.checkheads(self, remote, outgoing,
1905 remoteheads, newbranch,
1906 remoteheads, newbranch,
1906 bool(inc))
1907 bool(inc))
1907
1908
1908 # create a changegroup from local
1909 # create a changegroup from local
1909 if revs is None and not outgoing.excluded:
1910 if revs is None and not outgoing.excluded:
1910 # push everything,
1911 # push everything,
1911 # use the fast path, no race possible on push
1912 # use the fast path, no race possible on push
1912 cg = self._changegroup(outgoing.missing, 'push')
1913 cg = self._changegroup(outgoing.missing, 'push')
1913 else:
1914 else:
1914 cg = self.getlocalbundle('push', outgoing)
1915 cg = self.getlocalbundle('push', outgoing)
1915
1916
1916 # apply changegroup to remote
1917 # apply changegroup to remote
1917 if unbundle:
1918 if unbundle:
1918 # local repo finds heads on server, finds out what
1919 # local repo finds heads on server, finds out what
1919 # revs it must push. once revs transferred, if server
1920 # revs it must push. once revs transferred, if server
1920 # finds it has different heads (someone else won
1921 # finds it has different heads (someone else won
1921 # commit/push race), server aborts.
1922 # commit/push race), server aborts.
1922 if force:
1923 if force:
1923 remoteheads = ['force']
1924 remoteheads = ['force']
1924 # ssh: return remote's addchangegroup()
1925 # ssh: return remote's addchangegroup()
1925 # http: return remote's addchangegroup() or 0 for error
1926 # http: return remote's addchangegroup() or 0 for error
1926 ret = remote.unbundle(cg, remoteheads, 'push')
1927 ret = remote.unbundle(cg, remoteheads, 'push')
1927 else:
1928 else:
1928 # we return an integer indicating remote head count
1929 # we return an integer indicating remote head count
1929 # change
1930 # change
1930 ret = remote.addchangegroup(cg, 'push', self.url())
1931 ret = remote.addchangegroup(cg, 'push', self.url())
1931
1932
1932 if ret:
1933 if ret:
1933 # push succeed, synchronize target of the push
1934 # push succeed, synchronize target of the push
1934 cheads = outgoing.missingheads
1935 cheads = outgoing.missingheads
1935 elif revs is None:
1936 elif revs is None:
1936 # All out push fails. synchronize all common
1937 # All out push fails. synchronize all common
1937 cheads = outgoing.commonheads
1938 cheads = outgoing.commonheads
1938 else:
1939 else:
1939 # I want cheads = heads(::missingheads and ::commonheads)
1940 # I want cheads = heads(::missingheads and ::commonheads)
1940 # (missingheads is revs with secret changeset filtered out)
1941 # (missingheads is revs with secret changeset filtered out)
1941 #
1942 #
1942 # This can be expressed as:
1943 # This can be expressed as:
1943 # cheads = ( (missingheads and ::commonheads)
1944 # cheads = ( (missingheads and ::commonheads)
1944 # + (commonheads and ::missingheads))"
1945 # + (commonheads and ::missingheads))"
1945 # )
1946 # )
1946 #
1947 #
1947 # while trying to push we already computed the following:
1948 # while trying to push we already computed the following:
1948 # common = (::commonheads)
1949 # common = (::commonheads)
1949 # missing = ((commonheads::missingheads) - commonheads)
1950 # missing = ((commonheads::missingheads) - commonheads)
1950 #
1951 #
1951 # We can pick:
1952 # We can pick:
1952 # * missingheads part of common (::commonheads)
1953 # * missingheads part of common (::commonheads)
1953 common = set(outgoing.common)
1954 common = set(outgoing.common)
1954 cheads = [node for node in revs if node in common]
1955 cheads = [node for node in revs if node in common]
1955 # and
1956 # and
1956 # * commonheads parents on missing
1957 # * commonheads parents on missing
1957 revset = self.set('%ln and parents(roots(%ln))',
1958 revset = self.set('%ln and parents(roots(%ln))',
1958 outgoing.commonheads,
1959 outgoing.commonheads,
1959 outgoing.missing)
1960 outgoing.missing)
1960 cheads.extend(c.node() for c in revset)
1961 cheads.extend(c.node() for c in revset)
1961 # even when we don't push, exchanging phase data is useful
1962 # even when we don't push, exchanging phase data is useful
1962 remotephases = remote.listkeys('phases')
1963 remotephases = remote.listkeys('phases')
1963 if not remotephases: # old server or public only repo
1964 if not remotephases: # old server or public only repo
1964 phases.advanceboundary(self, phases.public, cheads)
1965 phases.advanceboundary(self, phases.public, cheads)
1965 # don't push any phase data as there is nothing to push
1966 # don't push any phase data as there is nothing to push
1966 else:
1967 else:
1967 ana = phases.analyzeremotephases(self, cheads, remotephases)
1968 ana = phases.analyzeremotephases(self, cheads, remotephases)
1968 pheads, droots = ana
1969 pheads, droots = ana
1969 ### Apply remote phase on local
1970 ### Apply remote phase on local
1970 if remotephases.get('publishing', False):
1971 if remotephases.get('publishing', False):
1971 phases.advanceboundary(self, phases.public, cheads)
1972 phases.advanceboundary(self, phases.public, cheads)
1972 else: # publish = False
1973 else: # publish = False
1973 phases.advanceboundary(self, phases.public, pheads)
1974 phases.advanceboundary(self, phases.public, pheads)
1974 phases.advanceboundary(self, phases.draft, cheads)
1975 phases.advanceboundary(self, phases.draft, cheads)
1975 ### Apply local phase on remote
1976 ### Apply local phase on remote
1976
1977
1977 # Get the list of all revs draft on remote by public here.
1978 # Get the list of all revs draft on remote by public here.
1978 # XXX Beware that revset break if droots is not strictly
1979 # XXX Beware that revset break if droots is not strictly
1979 # XXX root we may want to ensure it is but it is costly
1980 # XXX root we may want to ensure it is but it is costly
1980 outdated = self.set('heads((%ln::%ln) and public())',
1981 outdated = self.set('heads((%ln::%ln) and public())',
1981 droots, cheads)
1982 droots, cheads)
1982 for newremotehead in outdated:
1983 for newremotehead in outdated:
1983 r = remote.pushkey('phases',
1984 r = remote.pushkey('phases',
1984 newremotehead.hex(),
1985 newremotehead.hex(),
1985 str(phases.draft),
1986 str(phases.draft),
1986 str(phases.public))
1987 str(phases.public))
1987 if not r:
1988 if not r:
1988 self.ui.warn(_('updating %s to public failed!\n')
1989 self.ui.warn(_('updating %s to public failed!\n')
1989 % newremotehead)
1990 % newremotehead)
1990 self.ui.debug('try to push obsolete markers to remote\n')
1991 self.ui.debug('try to push obsolete markers to remote\n')
1991 if (obsolete._enabled and self.obsstore and
1992 if (obsolete._enabled and self.obsstore and
1992 'obsolete' in remote.listkeys('namespaces')):
1993 'obsolete' in remote.listkeys('namespaces')):
1993 rslts = []
1994 rslts = []
1994 remotedata = self.listkeys('obsolete')
1995 remotedata = self.listkeys('obsolete')
1995 for key in sorted(remotedata, reverse=True):
1996 for key in sorted(remotedata, reverse=True):
1996 # reverse sort to ensure we end with dump0
1997 # reverse sort to ensure we end with dump0
1997 data = remotedata[key]
1998 data = remotedata[key]
1998 rslts.append(remote.pushkey('obsolete', key, '', data))
1999 rslts.append(remote.pushkey('obsolete', key, '', data))
1999 if [r for r in rslts if not r]:
2000 if [r for r in rslts if not r]:
2000 msg = _('failed to push some obsolete markers!\n')
2001 msg = _('failed to push some obsolete markers!\n')
2001 self.ui.warn(msg)
2002 self.ui.warn(msg)
2002 finally:
2003 finally:
2003 if lock is not None:
2004 if lock is not None:
2004 lock.release()
2005 lock.release()
2005 finally:
2006 finally:
2006 locallock.release()
2007 locallock.release()
2007
2008
2008 self.ui.debug("checking for updated bookmarks\n")
2009 self.ui.debug("checking for updated bookmarks\n")
2009 rb = remote.listkeys('bookmarks')
2010 rb = remote.listkeys('bookmarks')
2010 for k in rb.keys():
2011 for k in rb.keys():
2011 if k in self._bookmarks:
2012 if k in self._bookmarks:
2012 nr, nl = rb[k], hex(self._bookmarks[k])
2013 nr, nl = rb[k], hex(self._bookmarks[k])
2013 if nr in self:
2014 if nr in self:
2014 cr = self[nr]
2015 cr = self[nr]
2015 cl = self[nl]
2016 cl = self[nl]
2016 if bookmarks.validdest(self, cr, cl):
2017 if bookmarks.validdest(self, cr, cl):
2017 r = remote.pushkey('bookmarks', k, nr, nl)
2018 r = remote.pushkey('bookmarks', k, nr, nl)
2018 if r:
2019 if r:
2019 self.ui.status(_("updating bookmark %s\n") % k)
2020 self.ui.status(_("updating bookmark %s\n") % k)
2020 else:
2021 else:
2021 self.ui.warn(_('updating bookmark %s'
2022 self.ui.warn(_('updating bookmark %s'
2022 ' failed!\n') % k)
2023 ' failed!\n') % k)
2023
2024
2024 return ret
2025 return ret
2025
2026
2026 def changegroupinfo(self, nodes, source):
2027 def changegroupinfo(self, nodes, source):
2027 if self.ui.verbose or source == 'bundle':
2028 if self.ui.verbose or source == 'bundle':
2028 self.ui.status(_("%d changesets found\n") % len(nodes))
2029 self.ui.status(_("%d changesets found\n") % len(nodes))
2029 if self.ui.debugflag:
2030 if self.ui.debugflag:
2030 self.ui.debug("list of changesets:\n")
2031 self.ui.debug("list of changesets:\n")
2031 for node in nodes:
2032 for node in nodes:
2032 self.ui.debug("%s\n" % hex(node))
2033 self.ui.debug("%s\n" % hex(node))
2033
2034
2034 def changegroupsubset(self, bases, heads, source):
2035 def changegroupsubset(self, bases, heads, source):
2035 """Compute a changegroup consisting of all the nodes that are
2036 """Compute a changegroup consisting of all the nodes that are
2036 descendants of any of the bases and ancestors of any of the heads.
2037 descendants of any of the bases and ancestors of any of the heads.
2037 Return a chunkbuffer object whose read() method will return
2038 Return a chunkbuffer object whose read() method will return
2038 successive changegroup chunks.
2039 successive changegroup chunks.
2039
2040
2040 It is fairly complex as determining which filenodes and which
2041 It is fairly complex as determining which filenodes and which
2041 manifest nodes need to be included for the changeset to be complete
2042 manifest nodes need to be included for the changeset to be complete
2042 is non-trivial.
2043 is non-trivial.
2043
2044
2044 Another wrinkle is doing the reverse, figuring out which changeset in
2045 Another wrinkle is doing the reverse, figuring out which changeset in
2045 the changegroup a particular filenode or manifestnode belongs to.
2046 the changegroup a particular filenode or manifestnode belongs to.
2046 """
2047 """
2047 cl = self.changelog
2048 cl = self.changelog
2048 if not bases:
2049 if not bases:
2049 bases = [nullid]
2050 bases = [nullid]
2050 csets, bases, heads = cl.nodesbetween(bases, heads)
2051 csets, bases, heads = cl.nodesbetween(bases, heads)
2051 # We assume that all ancestors of bases are known
2052 # We assume that all ancestors of bases are known
2052 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2053 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2053 return self._changegroupsubset(common, csets, heads, source)
2054 return self._changegroupsubset(common, csets, heads, source)
2054
2055
2055 def getlocalbundle(self, source, outgoing):
2056 def getlocalbundle(self, source, outgoing):
2056 """Like getbundle, but taking a discovery.outgoing as an argument.
2057 """Like getbundle, but taking a discovery.outgoing as an argument.
2057
2058
2058 This is only implemented for local repos and reuses potentially
2059 This is only implemented for local repos and reuses potentially
2059 precomputed sets in outgoing."""
2060 precomputed sets in outgoing."""
2060 if not outgoing.missing:
2061 if not outgoing.missing:
2061 return None
2062 return None
2062 return self._changegroupsubset(outgoing.common,
2063 return self._changegroupsubset(outgoing.common,
2063 outgoing.missing,
2064 outgoing.missing,
2064 outgoing.missingheads,
2065 outgoing.missingheads,
2065 source)
2066 source)
2066
2067
2067 def getbundle(self, source, heads=None, common=None):
2068 def getbundle(self, source, heads=None, common=None):
2068 """Like changegroupsubset, but returns the set difference between the
2069 """Like changegroupsubset, but returns the set difference between the
2069 ancestors of heads and the ancestors common.
2070 ancestors of heads and the ancestors common.
2070
2071
2071 If heads is None, use the local heads. If common is None, use [nullid].
2072 If heads is None, use the local heads. If common is None, use [nullid].
2072
2073
2073 The nodes in common might not all be known locally due to the way the
2074 The nodes in common might not all be known locally due to the way the
2074 current discovery protocol works.
2075 current discovery protocol works.
2075 """
2076 """
2076 cl = self.changelog
2077 cl = self.changelog
2077 if common:
2078 if common:
2078 nm = cl.nodemap
2079 nm = cl.nodemap
2079 common = [n for n in common if n in nm]
2080 common = [n for n in common if n in nm]
2080 else:
2081 else:
2081 common = [nullid]
2082 common = [nullid]
2082 if not heads:
2083 if not heads:
2083 heads = cl.heads()
2084 heads = cl.heads()
2084 return self.getlocalbundle(source,
2085 return self.getlocalbundle(source,
2085 discovery.outgoing(cl, common, heads))
2086 discovery.outgoing(cl, common, heads))
2086
2087
2087 def _changegroupsubset(self, commonrevs, csets, heads, source):
2088 def _changegroupsubset(self, commonrevs, csets, heads, source):
2088
2089
2089 cl = self.changelog
2090 cl = self.changelog
2090 mf = self.manifest
2091 mf = self.manifest
2091 mfs = {} # needed manifests
2092 mfs = {} # needed manifests
2092 fnodes = {} # needed file nodes
2093 fnodes = {} # needed file nodes
2093 changedfiles = set()
2094 changedfiles = set()
2094 fstate = ['', {}]
2095 fstate = ['', {}]
2095 count = [0, 0]
2096 count = [0, 0]
2096
2097
2097 # can we go through the fast path ?
2098 # can we go through the fast path ?
2098 heads.sort()
2099 heads.sort()
2099 if heads == sorted(self.heads()):
2100 if heads == sorted(self.heads()):
2100 return self._changegroup(csets, source)
2101 return self._changegroup(csets, source)
2101
2102
2102 # slow path
2103 # slow path
2103 self.hook('preoutgoing', throw=True, source=source)
2104 self.hook('preoutgoing', throw=True, source=source)
2104 self.changegroupinfo(csets, source)
2105 self.changegroupinfo(csets, source)
2105
2106
2106 # filter any nodes that claim to be part of the known set
2107 # filter any nodes that claim to be part of the known set
2107 def prune(revlog, missing):
2108 def prune(revlog, missing):
2108 rr, rl = revlog.rev, revlog.linkrev
2109 rr, rl = revlog.rev, revlog.linkrev
2109 return [n for n in missing
2110 return [n for n in missing
2110 if rl(rr(n)) not in commonrevs]
2111 if rl(rr(n)) not in commonrevs]
2111
2112
2112 progress = self.ui.progress
2113 progress = self.ui.progress
2113 _bundling = _('bundling')
2114 _bundling = _('bundling')
2114 _changesets = _('changesets')
2115 _changesets = _('changesets')
2115 _manifests = _('manifests')
2116 _manifests = _('manifests')
2116 _files = _('files')
2117 _files = _('files')
2117
2118
2118 def lookup(revlog, x):
2119 def lookup(revlog, x):
2119 if revlog == cl:
2120 if revlog == cl:
2120 c = cl.read(x)
2121 c = cl.read(x)
2121 changedfiles.update(c[3])
2122 changedfiles.update(c[3])
2122 mfs.setdefault(c[0], x)
2123 mfs.setdefault(c[0], x)
2123 count[0] += 1
2124 count[0] += 1
2124 progress(_bundling, count[0],
2125 progress(_bundling, count[0],
2125 unit=_changesets, total=count[1])
2126 unit=_changesets, total=count[1])
2126 return x
2127 return x
2127 elif revlog == mf:
2128 elif revlog == mf:
2128 clnode = mfs[x]
2129 clnode = mfs[x]
2129 mdata = mf.readfast(x)
2130 mdata = mf.readfast(x)
2130 for f, n in mdata.iteritems():
2131 for f, n in mdata.iteritems():
2131 if f in changedfiles:
2132 if f in changedfiles:
2132 fnodes[f].setdefault(n, clnode)
2133 fnodes[f].setdefault(n, clnode)
2133 count[0] += 1
2134 count[0] += 1
2134 progress(_bundling, count[0],
2135 progress(_bundling, count[0],
2135 unit=_manifests, total=count[1])
2136 unit=_manifests, total=count[1])
2136 return clnode
2137 return clnode
2137 else:
2138 else:
2138 progress(_bundling, count[0], item=fstate[0],
2139 progress(_bundling, count[0], item=fstate[0],
2139 unit=_files, total=count[1])
2140 unit=_files, total=count[1])
2140 return fstate[1][x]
2141 return fstate[1][x]
2141
2142
2142 bundler = changegroup.bundle10(lookup)
2143 bundler = changegroup.bundle10(lookup)
2143 reorder = self.ui.config('bundle', 'reorder', 'auto')
2144 reorder = self.ui.config('bundle', 'reorder', 'auto')
2144 if reorder == 'auto':
2145 if reorder == 'auto':
2145 reorder = None
2146 reorder = None
2146 else:
2147 else:
2147 reorder = util.parsebool(reorder)
2148 reorder = util.parsebool(reorder)
2148
2149
2149 def gengroup():
2150 def gengroup():
2150 # Create a changenode group generator that will call our functions
2151 # Create a changenode group generator that will call our functions
2151 # back to lookup the owning changenode and collect information.
2152 # back to lookup the owning changenode and collect information.
2152 count[:] = [0, len(csets)]
2153 count[:] = [0, len(csets)]
2153 for chunk in cl.group(csets, bundler, reorder=reorder):
2154 for chunk in cl.group(csets, bundler, reorder=reorder):
2154 yield chunk
2155 yield chunk
2155 progress(_bundling, None)
2156 progress(_bundling, None)
2156
2157
2157 # Create a generator for the manifestnodes that calls our lookup
2158 # Create a generator for the manifestnodes that calls our lookup
2158 # and data collection functions back.
2159 # and data collection functions back.
2159 for f in changedfiles:
2160 for f in changedfiles:
2160 fnodes[f] = {}
2161 fnodes[f] = {}
2161 count[:] = [0, len(mfs)]
2162 count[:] = [0, len(mfs)]
2162 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2163 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2163 yield chunk
2164 yield chunk
2164 progress(_bundling, None)
2165 progress(_bundling, None)
2165
2166
2166 mfs.clear()
2167 mfs.clear()
2167
2168
2168 # Go through all our files in order sorted by name.
2169 # Go through all our files in order sorted by name.
2169 count[:] = [0, len(changedfiles)]
2170 count[:] = [0, len(changedfiles)]
2170 for fname in sorted(changedfiles):
2171 for fname in sorted(changedfiles):
2171 filerevlog = self.file(fname)
2172 filerevlog = self.file(fname)
2172 if not len(filerevlog):
2173 if not len(filerevlog):
2173 raise util.Abort(_("empty or missing revlog for %s")
2174 raise util.Abort(_("empty or missing revlog for %s")
2174 % fname)
2175 % fname)
2175 fstate[0] = fname
2176 fstate[0] = fname
2176 fstate[1] = fnodes.pop(fname, {})
2177 fstate[1] = fnodes.pop(fname, {})
2177
2178
2178 nodelist = prune(filerevlog, fstate[1])
2179 nodelist = prune(filerevlog, fstate[1])
2179 if nodelist:
2180 if nodelist:
2180 count[0] += 1
2181 count[0] += 1
2181 yield bundler.fileheader(fname)
2182 yield bundler.fileheader(fname)
2182 for chunk in filerevlog.group(nodelist, bundler, reorder):
2183 for chunk in filerevlog.group(nodelist, bundler, reorder):
2183 yield chunk
2184 yield chunk
2184
2185
2185 # Signal that no more groups are left.
2186 # Signal that no more groups are left.
2186 yield bundler.close()
2187 yield bundler.close()
2187 progress(_bundling, None)
2188 progress(_bundling, None)
2188
2189
2189 if csets:
2190 if csets:
2190 self.hook('outgoing', node=hex(csets[0]), source=source)
2191 self.hook('outgoing', node=hex(csets[0]), source=source)
2191
2192
2192 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2193 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2193
2194
2194 def changegroup(self, basenodes, source):
2195 def changegroup(self, basenodes, source):
2195 # to avoid a race we use changegroupsubset() (issue1320)
2196 # to avoid a race we use changegroupsubset() (issue1320)
2196 return self.changegroupsubset(basenodes, self.heads(), source)
2197 return self.changegroupsubset(basenodes, self.heads(), source)
2197
2198
2198 def _changegroup(self, nodes, source):
2199 def _changegroup(self, nodes, source):
2199 """Compute the changegroup of all nodes that we have that a recipient
2200 """Compute the changegroup of all nodes that we have that a recipient
2200 doesn't. Return a chunkbuffer object whose read() method will return
2201 doesn't. Return a chunkbuffer object whose read() method will return
2201 successive changegroup chunks.
2202 successive changegroup chunks.
2202
2203
2203 This is much easier than the previous function as we can assume that
2204 This is much easier than the previous function as we can assume that
2204 the recipient has any changenode we aren't sending them.
2205 the recipient has any changenode we aren't sending them.
2205
2206
2206 nodes is the set of nodes to send"""
2207 nodes is the set of nodes to send"""
2207
2208
2208 cl = self.changelog
2209 cl = self.changelog
2209 mf = self.manifest
2210 mf = self.manifest
2210 mfs = {}
2211 mfs = {}
2211 changedfiles = set()
2212 changedfiles = set()
2212 fstate = ['']
2213 fstate = ['']
2213 count = [0, 0]
2214 count = [0, 0]
2214
2215
2215 self.hook('preoutgoing', throw=True, source=source)
2216 self.hook('preoutgoing', throw=True, source=source)
2216 self.changegroupinfo(nodes, source)
2217 self.changegroupinfo(nodes, source)
2217
2218
2218 revset = set([cl.rev(n) for n in nodes])
2219 revset = set([cl.rev(n) for n in nodes])
2219
2220
2220 def gennodelst(log):
2221 def gennodelst(log):
2221 ln, llr = log.node, log.linkrev
2222 ln, llr = log.node, log.linkrev
2222 return [ln(r) for r in log if llr(r) in revset]
2223 return [ln(r) for r in log if llr(r) in revset]
2223
2224
2224 progress = self.ui.progress
2225 progress = self.ui.progress
2225 _bundling = _('bundling')
2226 _bundling = _('bundling')
2226 _changesets = _('changesets')
2227 _changesets = _('changesets')
2227 _manifests = _('manifests')
2228 _manifests = _('manifests')
2228 _files = _('files')
2229 _files = _('files')
2229
2230
2230 def lookup(revlog, x):
2231 def lookup(revlog, x):
2231 if revlog == cl:
2232 if revlog == cl:
2232 c = cl.read(x)
2233 c = cl.read(x)
2233 changedfiles.update(c[3])
2234 changedfiles.update(c[3])
2234 mfs.setdefault(c[0], x)
2235 mfs.setdefault(c[0], x)
2235 count[0] += 1
2236 count[0] += 1
2236 progress(_bundling, count[0],
2237 progress(_bundling, count[0],
2237 unit=_changesets, total=count[1])
2238 unit=_changesets, total=count[1])
2238 return x
2239 return x
2239 elif revlog == mf:
2240 elif revlog == mf:
2240 count[0] += 1
2241 count[0] += 1
2241 progress(_bundling, count[0],
2242 progress(_bundling, count[0],
2242 unit=_manifests, total=count[1])
2243 unit=_manifests, total=count[1])
2243 return cl.node(revlog.linkrev(revlog.rev(x)))
2244 return cl.node(revlog.linkrev(revlog.rev(x)))
2244 else:
2245 else:
2245 progress(_bundling, count[0], item=fstate[0],
2246 progress(_bundling, count[0], item=fstate[0],
2246 total=count[1], unit=_files)
2247 total=count[1], unit=_files)
2247 return cl.node(revlog.linkrev(revlog.rev(x)))
2248 return cl.node(revlog.linkrev(revlog.rev(x)))
2248
2249
2249 bundler = changegroup.bundle10(lookup)
2250 bundler = changegroup.bundle10(lookup)
2250 reorder = self.ui.config('bundle', 'reorder', 'auto')
2251 reorder = self.ui.config('bundle', 'reorder', 'auto')
2251 if reorder == 'auto':
2252 if reorder == 'auto':
2252 reorder = None
2253 reorder = None
2253 else:
2254 else:
2254 reorder = util.parsebool(reorder)
2255 reorder = util.parsebool(reorder)
2255
2256
2256 def gengroup():
2257 def gengroup():
2257 '''yield a sequence of changegroup chunks (strings)'''
2258 '''yield a sequence of changegroup chunks (strings)'''
2258 # construct a list of all changed files
2259 # construct a list of all changed files
2259
2260
2260 count[:] = [0, len(nodes)]
2261 count[:] = [0, len(nodes)]
2261 for chunk in cl.group(nodes, bundler, reorder=reorder):
2262 for chunk in cl.group(nodes, bundler, reorder=reorder):
2262 yield chunk
2263 yield chunk
2263 progress(_bundling, None)
2264 progress(_bundling, None)
2264
2265
2265 count[:] = [0, len(mfs)]
2266 count[:] = [0, len(mfs)]
2266 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2267 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2267 yield chunk
2268 yield chunk
2268 progress(_bundling, None)
2269 progress(_bundling, None)
2269
2270
2270 count[:] = [0, len(changedfiles)]
2271 count[:] = [0, len(changedfiles)]
2271 for fname in sorted(changedfiles):
2272 for fname in sorted(changedfiles):
2272 filerevlog = self.file(fname)
2273 filerevlog = self.file(fname)
2273 if not len(filerevlog):
2274 if not len(filerevlog):
2274 raise util.Abort(_("empty or missing revlog for %s")
2275 raise util.Abort(_("empty or missing revlog for %s")
2275 % fname)
2276 % fname)
2276 fstate[0] = fname
2277 fstate[0] = fname
2277 nodelist = gennodelst(filerevlog)
2278 nodelist = gennodelst(filerevlog)
2278 if nodelist:
2279 if nodelist:
2279 count[0] += 1
2280 count[0] += 1
2280 yield bundler.fileheader(fname)
2281 yield bundler.fileheader(fname)
2281 for chunk in filerevlog.group(nodelist, bundler, reorder):
2282 for chunk in filerevlog.group(nodelist, bundler, reorder):
2282 yield chunk
2283 yield chunk
2283 yield bundler.close()
2284 yield bundler.close()
2284 progress(_bundling, None)
2285 progress(_bundling, None)
2285
2286
2286 if nodes:
2287 if nodes:
2287 self.hook('outgoing', node=hex(nodes[0]), source=source)
2288 self.hook('outgoing', node=hex(nodes[0]), source=source)
2288
2289
2289 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2290 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2290
2291
2291 def addchangegroup(self, source, srctype, url, emptyok=False):
2292 def addchangegroup(self, source, srctype, url, emptyok=False):
2292 """Add the changegroup returned by source.read() to this repo.
2293 """Add the changegroup returned by source.read() to this repo.
2293 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2294 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2294 the URL of the repo where this changegroup is coming from.
2295 the URL of the repo where this changegroup is coming from.
2295
2296
2296 Return an integer summarizing the change to this repo:
2297 Return an integer summarizing the change to this repo:
2297 - nothing changed or no source: 0
2298 - nothing changed or no source: 0
2298 - more heads than before: 1+added heads (2..n)
2299 - more heads than before: 1+added heads (2..n)
2299 - fewer heads than before: -1-removed heads (-2..-n)
2300 - fewer heads than before: -1-removed heads (-2..-n)
2300 - number of heads stays the same: 1
2301 - number of heads stays the same: 1
2301 """
2302 """
2302 def csmap(x):
2303 def csmap(x):
2303 self.ui.debug("add changeset %s\n" % short(x))
2304 self.ui.debug("add changeset %s\n" % short(x))
2304 return len(cl)
2305 return len(cl)
2305
2306
2306 def revmap(x):
2307 def revmap(x):
2307 return cl.rev(x)
2308 return cl.rev(x)
2308
2309
2309 if not source:
2310 if not source:
2310 return 0
2311 return 0
2311
2312
2312 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2313 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2313
2314
2314 changesets = files = revisions = 0
2315 changesets = files = revisions = 0
2315 efiles = set()
2316 efiles = set()
2316
2317
2317 # write changelog data to temp files so concurrent readers will not see
2318 # write changelog data to temp files so concurrent readers will not see
2318 # inconsistent view
2319 # inconsistent view
2319 cl = self.changelog
2320 cl = self.changelog
2320 cl.delayupdate()
2321 cl.delayupdate()
2321 oldheads = cl.heads()
2322 oldheads = cl.heads()
2322
2323
2323 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2324 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2324 try:
2325 try:
2325 trp = weakref.proxy(tr)
2326 trp = weakref.proxy(tr)
2326 # pull off the changeset group
2327 # pull off the changeset group
2327 self.ui.status(_("adding changesets\n"))
2328 self.ui.status(_("adding changesets\n"))
2328 clstart = len(cl)
2329 clstart = len(cl)
2329 class prog(object):
2330 class prog(object):
2330 step = _('changesets')
2331 step = _('changesets')
2331 count = 1
2332 count = 1
2332 ui = self.ui
2333 ui = self.ui
2333 total = None
2334 total = None
2334 def __call__(self):
2335 def __call__(self):
2335 self.ui.progress(self.step, self.count, unit=_('chunks'),
2336 self.ui.progress(self.step, self.count, unit=_('chunks'),
2336 total=self.total)
2337 total=self.total)
2337 self.count += 1
2338 self.count += 1
2338 pr = prog()
2339 pr = prog()
2339 source.callback = pr
2340 source.callback = pr
2340
2341
2341 source.changelogheader()
2342 source.changelogheader()
2342 srccontent = cl.addgroup(source, csmap, trp)
2343 srccontent = cl.addgroup(source, csmap, trp)
2343 if not (srccontent or emptyok):
2344 if not (srccontent or emptyok):
2344 raise util.Abort(_("received changelog group is empty"))
2345 raise util.Abort(_("received changelog group is empty"))
2345 clend = len(cl)
2346 clend = len(cl)
2346 changesets = clend - clstart
2347 changesets = clend - clstart
2347 for c in xrange(clstart, clend):
2348 for c in xrange(clstart, clend):
2348 efiles.update(self[c].files())
2349 efiles.update(self[c].files())
2349 efiles = len(efiles)
2350 efiles = len(efiles)
2350 self.ui.progress(_('changesets'), None)
2351 self.ui.progress(_('changesets'), None)
2351
2352
2352 # pull off the manifest group
2353 # pull off the manifest group
2353 self.ui.status(_("adding manifests\n"))
2354 self.ui.status(_("adding manifests\n"))
2354 pr.step = _('manifests')
2355 pr.step = _('manifests')
2355 pr.count = 1
2356 pr.count = 1
2356 pr.total = changesets # manifests <= changesets
2357 pr.total = changesets # manifests <= changesets
2357 # no need to check for empty manifest group here:
2358 # no need to check for empty manifest group here:
2358 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2359 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2359 # no new manifest will be created and the manifest group will
2360 # no new manifest will be created and the manifest group will
2360 # be empty during the pull
2361 # be empty during the pull
2361 source.manifestheader()
2362 source.manifestheader()
2362 self.manifest.addgroup(source, revmap, trp)
2363 self.manifest.addgroup(source, revmap, trp)
2363 self.ui.progress(_('manifests'), None)
2364 self.ui.progress(_('manifests'), None)
2364
2365
2365 needfiles = {}
2366 needfiles = {}
2366 if self.ui.configbool('server', 'validate', default=False):
2367 if self.ui.configbool('server', 'validate', default=False):
2367 # validate incoming csets have their manifests
2368 # validate incoming csets have their manifests
2368 for cset in xrange(clstart, clend):
2369 for cset in xrange(clstart, clend):
2369 mfest = self.changelog.read(self.changelog.node(cset))[0]
2370 mfest = self.changelog.read(self.changelog.node(cset))[0]
2370 mfest = self.manifest.readdelta(mfest)
2371 mfest = self.manifest.readdelta(mfest)
2371 # store file nodes we must see
2372 # store file nodes we must see
2372 for f, n in mfest.iteritems():
2373 for f, n in mfest.iteritems():
2373 needfiles.setdefault(f, set()).add(n)
2374 needfiles.setdefault(f, set()).add(n)
2374
2375
2375 # process the files
2376 # process the files
2376 self.ui.status(_("adding file changes\n"))
2377 self.ui.status(_("adding file changes\n"))
2377 pr.step = _('files')
2378 pr.step = _('files')
2378 pr.count = 1
2379 pr.count = 1
2379 pr.total = efiles
2380 pr.total = efiles
2380 source.callback = None
2381 source.callback = None
2381
2382
2382 while True:
2383 while True:
2383 chunkdata = source.filelogheader()
2384 chunkdata = source.filelogheader()
2384 if not chunkdata:
2385 if not chunkdata:
2385 break
2386 break
2386 f = chunkdata["filename"]
2387 f = chunkdata["filename"]
2387 self.ui.debug("adding %s revisions\n" % f)
2388 self.ui.debug("adding %s revisions\n" % f)
2388 pr()
2389 pr()
2389 fl = self.file(f)
2390 fl = self.file(f)
2390 o = len(fl)
2391 o = len(fl)
2391 if not fl.addgroup(source, revmap, trp):
2392 if not fl.addgroup(source, revmap, trp):
2392 raise util.Abort(_("received file revlog group is empty"))
2393 raise util.Abort(_("received file revlog group is empty"))
2393 revisions += len(fl) - o
2394 revisions += len(fl) - o
2394 files += 1
2395 files += 1
2395 if f in needfiles:
2396 if f in needfiles:
2396 needs = needfiles[f]
2397 needs = needfiles[f]
2397 for new in xrange(o, len(fl)):
2398 for new in xrange(o, len(fl)):
2398 n = fl.node(new)
2399 n = fl.node(new)
2399 if n in needs:
2400 if n in needs:
2400 needs.remove(n)
2401 needs.remove(n)
2401 if not needs:
2402 if not needs:
2402 del needfiles[f]
2403 del needfiles[f]
2403 self.ui.progress(_('files'), None)
2404 self.ui.progress(_('files'), None)
2404
2405
2405 for f, needs in needfiles.iteritems():
2406 for f, needs in needfiles.iteritems():
2406 fl = self.file(f)
2407 fl = self.file(f)
2407 for n in needs:
2408 for n in needs:
2408 try:
2409 try:
2409 fl.rev(n)
2410 fl.rev(n)
2410 except error.LookupError:
2411 except error.LookupError:
2411 raise util.Abort(
2412 raise util.Abort(
2412 _('missing file data for %s:%s - run hg verify') %
2413 _('missing file data for %s:%s - run hg verify') %
2413 (f, hex(n)))
2414 (f, hex(n)))
2414
2415
2415 dh = 0
2416 dh = 0
2416 if oldheads:
2417 if oldheads:
2417 heads = cl.heads()
2418 heads = cl.heads()
2418 dh = len(heads) - len(oldheads)
2419 dh = len(heads) - len(oldheads)
2419 for h in heads:
2420 for h in heads:
2420 if h not in oldheads and self[h].closesbranch():
2421 if h not in oldheads and self[h].closesbranch():
2421 dh -= 1
2422 dh -= 1
2422 htext = ""
2423 htext = ""
2423 if dh:
2424 if dh:
2424 htext = _(" (%+d heads)") % dh
2425 htext = _(" (%+d heads)") % dh
2425
2426
2426 self.ui.status(_("added %d changesets"
2427 self.ui.status(_("added %d changesets"
2427 " with %d changes to %d files%s\n")
2428 " with %d changes to %d files%s\n")
2428 % (changesets, revisions, files, htext))
2429 % (changesets, revisions, files, htext))
2429 obsolete.clearobscaches(self)
2430 obsolete.clearobscaches(self)
2430
2431
2431 if changesets > 0:
2432 if changesets > 0:
2432 p = lambda: cl.writepending() and self.root or ""
2433 p = lambda: cl.writepending() and self.root or ""
2433 self.hook('pretxnchangegroup', throw=True,
2434 self.hook('pretxnchangegroup', throw=True,
2434 node=hex(cl.node(clstart)), source=srctype,
2435 node=hex(cl.node(clstart)), source=srctype,
2435 url=url, pending=p)
2436 url=url, pending=p)
2436
2437
2437 added = [cl.node(r) for r in xrange(clstart, clend)]
2438 added = [cl.node(r) for r in xrange(clstart, clend)]
2438 publishing = self.ui.configbool('phases', 'publish', True)
2439 publishing = self.ui.configbool('phases', 'publish', True)
2439 if srctype == 'push':
2440 if srctype == 'push':
2440 # Old server can not push the boundary themself.
2441 # Old server can not push the boundary themself.
2441 # New server won't push the boundary if changeset already
2442 # New server won't push the boundary if changeset already
2442 # existed locally as secrete
2443 # existed locally as secrete
2443 #
2444 #
2444 # We should not use added here but the list of all change in
2445 # We should not use added here but the list of all change in
2445 # the bundle
2446 # the bundle
2446 if publishing:
2447 if publishing:
2447 phases.advanceboundary(self, phases.public, srccontent)
2448 phases.advanceboundary(self, phases.public, srccontent)
2448 else:
2449 else:
2449 phases.advanceboundary(self, phases.draft, srccontent)
2450 phases.advanceboundary(self, phases.draft, srccontent)
2450 phases.retractboundary(self, phases.draft, added)
2451 phases.retractboundary(self, phases.draft, added)
2451 elif srctype != 'strip':
2452 elif srctype != 'strip':
2452 # publishing only alter behavior during push
2453 # publishing only alter behavior during push
2453 #
2454 #
2454 # strip should not touch boundary at all
2455 # strip should not touch boundary at all
2455 phases.retractboundary(self, phases.draft, added)
2456 phases.retractboundary(self, phases.draft, added)
2456
2457
2457 # make changelog see real files again
2458 # make changelog see real files again
2458 cl.finalize(trp)
2459 cl.finalize(trp)
2459
2460
2460 tr.close()
2461 tr.close()
2461
2462
2462 if changesets > 0:
2463 if changesets > 0:
2463 self.updatebranchcache()
2464 self.updatebranchcache()
2464 def runhooks():
2465 def runhooks():
2465 # forcefully update the on-disk branch cache
2466 # forcefully update the on-disk branch cache
2466 self.ui.debug("updating the branch cache\n")
2467 self.ui.debug("updating the branch cache\n")
2467 self.hook("changegroup", node=hex(cl.node(clstart)),
2468 self.hook("changegroup", node=hex(cl.node(clstart)),
2468 source=srctype, url=url)
2469 source=srctype, url=url)
2469
2470
2470 for n in added:
2471 for n in added:
2471 self.hook("incoming", node=hex(n), source=srctype,
2472 self.hook("incoming", node=hex(n), source=srctype,
2472 url=url)
2473 url=url)
2473 self._afterlock(runhooks)
2474 self._afterlock(runhooks)
2474
2475
2475 finally:
2476 finally:
2476 tr.release()
2477 tr.release()
2477 # never return 0 here:
2478 # never return 0 here:
2478 if dh < 0:
2479 if dh < 0:
2479 return dh - 1
2480 return dh - 1
2480 else:
2481 else:
2481 return dh + 1
2482 return dh + 1
2482
2483
2483 def stream_in(self, remote, requirements):
2484 def stream_in(self, remote, requirements):
2484 lock = self.lock()
2485 lock = self.lock()
2485 try:
2486 try:
2486 # Save remote branchmap. We will use it later
2487 # Save remote branchmap. We will use it later
2487 # to speed up branchcache creation
2488 # to speed up branchcache creation
2488 rbranchmap = None
2489 rbranchmap = None
2489 if remote.capable("branchmap"):
2490 if remote.capable("branchmap"):
2490 rbranchmap = remote.branchmap()
2491 rbranchmap = remote.branchmap()
2491
2492
2492 fp = remote.stream_out()
2493 fp = remote.stream_out()
2493 l = fp.readline()
2494 l = fp.readline()
2494 try:
2495 try:
2495 resp = int(l)
2496 resp = int(l)
2496 except ValueError:
2497 except ValueError:
2497 raise error.ResponseError(
2498 raise error.ResponseError(
2498 _('unexpected response from remote server:'), l)
2499 _('unexpected response from remote server:'), l)
2499 if resp == 1:
2500 if resp == 1:
2500 raise util.Abort(_('operation forbidden by server'))
2501 raise util.Abort(_('operation forbidden by server'))
2501 elif resp == 2:
2502 elif resp == 2:
2502 raise util.Abort(_('locking the remote repository failed'))
2503 raise util.Abort(_('locking the remote repository failed'))
2503 elif resp != 0:
2504 elif resp != 0:
2504 raise util.Abort(_('the server sent an unknown error code'))
2505 raise util.Abort(_('the server sent an unknown error code'))
2505 self.ui.status(_('streaming all changes\n'))
2506 self.ui.status(_('streaming all changes\n'))
2506 l = fp.readline()
2507 l = fp.readline()
2507 try:
2508 try:
2508 total_files, total_bytes = map(int, l.split(' ', 1))
2509 total_files, total_bytes = map(int, l.split(' ', 1))
2509 except (ValueError, TypeError):
2510 except (ValueError, TypeError):
2510 raise error.ResponseError(
2511 raise error.ResponseError(
2511 _('unexpected response from remote server:'), l)
2512 _('unexpected response from remote server:'), l)
2512 self.ui.status(_('%d files to transfer, %s of data\n') %
2513 self.ui.status(_('%d files to transfer, %s of data\n') %
2513 (total_files, util.bytecount(total_bytes)))
2514 (total_files, util.bytecount(total_bytes)))
2514 handled_bytes = 0
2515 handled_bytes = 0
2515 self.ui.progress(_('clone'), 0, total=total_bytes)
2516 self.ui.progress(_('clone'), 0, total=total_bytes)
2516 start = time.time()
2517 start = time.time()
2517 for i in xrange(total_files):
2518 for i in xrange(total_files):
2518 # XXX doesn't support '\n' or '\r' in filenames
2519 # XXX doesn't support '\n' or '\r' in filenames
2519 l = fp.readline()
2520 l = fp.readline()
2520 try:
2521 try:
2521 name, size = l.split('\0', 1)
2522 name, size = l.split('\0', 1)
2522 size = int(size)
2523 size = int(size)
2523 except (ValueError, TypeError):
2524 except (ValueError, TypeError):
2524 raise error.ResponseError(
2525 raise error.ResponseError(
2525 _('unexpected response from remote server:'), l)
2526 _('unexpected response from remote server:'), l)
2526 if self.ui.debugflag:
2527 if self.ui.debugflag:
2527 self.ui.debug('adding %s (%s)\n' %
2528 self.ui.debug('adding %s (%s)\n' %
2528 (name, util.bytecount(size)))
2529 (name, util.bytecount(size)))
2529 # for backwards compat, name was partially encoded
2530 # for backwards compat, name was partially encoded
2530 ofp = self.sopener(store.decodedir(name), 'w')
2531 ofp = self.sopener(store.decodedir(name), 'w')
2531 for chunk in util.filechunkiter(fp, limit=size):
2532 for chunk in util.filechunkiter(fp, limit=size):
2532 handled_bytes += len(chunk)
2533 handled_bytes += len(chunk)
2533 self.ui.progress(_('clone'), handled_bytes,
2534 self.ui.progress(_('clone'), handled_bytes,
2534 total=total_bytes)
2535 total=total_bytes)
2535 ofp.write(chunk)
2536 ofp.write(chunk)
2536 ofp.close()
2537 ofp.close()
2537 elapsed = time.time() - start
2538 elapsed = time.time() - start
2538 if elapsed <= 0:
2539 if elapsed <= 0:
2539 elapsed = 0.001
2540 elapsed = 0.001
2540 self.ui.progress(_('clone'), None)
2541 self.ui.progress(_('clone'), None)
2541 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2542 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2542 (util.bytecount(total_bytes), elapsed,
2543 (util.bytecount(total_bytes), elapsed,
2543 util.bytecount(total_bytes / elapsed)))
2544 util.bytecount(total_bytes / elapsed)))
2544
2545
2545 # new requirements = old non-format requirements +
2546 # new requirements = old non-format requirements +
2546 # new format-related
2547 # new format-related
2547 # requirements from the streamed-in repository
2548 # requirements from the streamed-in repository
2548 requirements.update(set(self.requirements) - self.supportedformats)
2549 requirements.update(set(self.requirements) - self.supportedformats)
2549 self._applyrequirements(requirements)
2550 self._applyrequirements(requirements)
2550 self._writerequirements()
2551 self._writerequirements()
2551
2552
2552 if rbranchmap:
2553 if rbranchmap:
2553 rbheads = []
2554 rbheads = []
2554 for bheads in rbranchmap.itervalues():
2555 for bheads in rbranchmap.itervalues():
2555 rbheads.extend(bheads)
2556 rbheads.extend(bheads)
2556
2557
2557 self.branchcache = rbranchmap
2558 self.branchcache = rbranchmap
2558 if rbheads:
2559 if rbheads:
2559 rtiprev = max((int(self.changelog.rev(node))
2560 rtiprev = max((int(self.changelog.rev(node))
2560 for node in rbheads))
2561 for node in rbheads))
2561 self._writebranchcache(self.branchcache,
2562 self._writebranchcache(self.branchcache,
2562 self[rtiprev].node(), rtiprev)
2563 self[rtiprev].node(), rtiprev)
2563 self.invalidate()
2564 self.invalidate()
2564 return len(self.heads()) + 1
2565 return len(self.heads()) + 1
2565 finally:
2566 finally:
2566 lock.release()
2567 lock.release()
2567
2568
2568 def clone(self, remote, heads=[], stream=False):
2569 def clone(self, remote, heads=[], stream=False):
2569 '''clone remote repository.
2570 '''clone remote repository.
2570
2571
2571 keyword arguments:
2572 keyword arguments:
2572 heads: list of revs to clone (forces use of pull)
2573 heads: list of revs to clone (forces use of pull)
2573 stream: use streaming clone if possible'''
2574 stream: use streaming clone if possible'''
2574
2575
2575 # now, all clients that can request uncompressed clones can
2576 # now, all clients that can request uncompressed clones can
2576 # read repo formats supported by all servers that can serve
2577 # read repo formats supported by all servers that can serve
2577 # them.
2578 # them.
2578
2579
2579 # if revlog format changes, client will have to check version
2580 # if revlog format changes, client will have to check version
2580 # and format flags on "stream" capability, and use
2581 # and format flags on "stream" capability, and use
2581 # uncompressed only if compatible.
2582 # uncompressed only if compatible.
2582
2583
2583 if not stream:
2584 if not stream:
2584 # if the server explicitly prefers to stream (for fast LANs)
2585 # if the server explicitly prefers to stream (for fast LANs)
2585 stream = remote.capable('stream-preferred')
2586 stream = remote.capable('stream-preferred')
2586
2587
2587 if stream and not heads:
2588 if stream and not heads:
2588 # 'stream' means remote revlog format is revlogv1 only
2589 # 'stream' means remote revlog format is revlogv1 only
2589 if remote.capable('stream'):
2590 if remote.capable('stream'):
2590 return self.stream_in(remote, set(('revlogv1',)))
2591 return self.stream_in(remote, set(('revlogv1',)))
2591 # otherwise, 'streamreqs' contains the remote revlog format
2592 # otherwise, 'streamreqs' contains the remote revlog format
2592 streamreqs = remote.capable('streamreqs')
2593 streamreqs = remote.capable('streamreqs')
2593 if streamreqs:
2594 if streamreqs:
2594 streamreqs = set(streamreqs.split(','))
2595 streamreqs = set(streamreqs.split(','))
2595 # if we support it, stream in and adjust our requirements
2596 # if we support it, stream in and adjust our requirements
2596 if not streamreqs - self.supportedformats:
2597 if not streamreqs - self.supportedformats:
2597 return self.stream_in(remote, streamreqs)
2598 return self.stream_in(remote, streamreqs)
2598 return self.pull(remote, heads)
2599 return self.pull(remote, heads)
2599
2600
2600 def pushkey(self, namespace, key, old, new):
2601 def pushkey(self, namespace, key, old, new):
2601 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2602 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2602 old=old, new=new)
2603 old=old, new=new)
2603 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2604 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2604 ret = pushkey.push(self, namespace, key, old, new)
2605 ret = pushkey.push(self, namespace, key, old, new)
2605 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2606 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2606 ret=ret)
2607 ret=ret)
2607 return ret
2608 return ret
2608
2609
2609 def listkeys(self, namespace):
2610 def listkeys(self, namespace):
2610 self.hook('prelistkeys', throw=True, namespace=namespace)
2611 self.hook('prelistkeys', throw=True, namespace=namespace)
2611 self.ui.debug('listing keys for "%s"\n' % namespace)
2612 self.ui.debug('listing keys for "%s"\n' % namespace)
2612 values = pushkey.list(self, namespace)
2613 values = pushkey.list(self, namespace)
2613 self.hook('listkeys', namespace=namespace, values=values)
2614 self.hook('listkeys', namespace=namespace, values=values)
2614 return values
2615 return values
2615
2616
2616 def debugwireargs(self, one, two, three=None, four=None, five=None):
2617 def debugwireargs(self, one, two, three=None, four=None, five=None):
2617 '''used to test argument passing over the wire'''
2618 '''used to test argument passing over the wire'''
2618 return "%s %s %s %s %s" % (one, two, three, four, five)
2619 return "%s %s %s %s %s" % (one, two, three, four, five)
2619
2620
2620 def savecommitmessage(self, text):
2621 def savecommitmessage(self, text):
2621 fp = self.opener('last-message.txt', 'wb')
2622 fp = self.opener('last-message.txt', 'wb')
2622 try:
2623 try:
2623 fp.write(text)
2624 fp.write(text)
2624 finally:
2625 finally:
2625 fp.close()
2626 fp.close()
2626 return self.pathto(fp.name[len(self.root)+1:])
2627 return self.pathto(fp.name[len(self.root)+1:])
2627
2628
2628 # used to avoid circular references so destructors work
2629 # used to avoid circular references so destructors work
2629 def aftertrans(files):
2630 def aftertrans(files):
2630 renamefiles = [tuple(t) for t in files]
2631 renamefiles = [tuple(t) for t in files]
2631 def a():
2632 def a():
2632 for src, dest in renamefiles:
2633 for src, dest in renamefiles:
2633 try:
2634 try:
2634 util.rename(src, dest)
2635 util.rename(src, dest)
2635 except OSError: # journal file does not yet exist
2636 except OSError: # journal file does not yet exist
2636 pass
2637 pass
2637 return a
2638 return a
2638
2639
2639 def undoname(fn):
2640 def undoname(fn):
2640 base, name = os.path.split(fn)
2641 base, name = os.path.split(fn)
2641 assert name.startswith('journal')
2642 assert name.startswith('journal')
2642 return os.path.join(base, name.replace('journal', 'undo', 1))
2643 return os.path.join(base, name.replace('journal', 'undo', 1))
2643
2644
2644 def instance(ui, path, create):
2645 def instance(ui, path, create):
2645 return localrepository(ui, util.urllocalpath(path), create)
2646 return localrepository(ui, util.urllocalpath(path), create)
2646
2647
2647 def islocal(path):
2648 def islocal(path):
2648 return True
2649 return True
General Comments 0
You need to be logged in to leave comments. Login now