##// END OF EJS Templates
clfilter: ensure that tag logic runs unfiltered...
Pierre-Yves David -
r17996:b3af182a default
parent child Browse files
Show More
@@ -1,2645 +1,2646 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import bin, hex, nullid, nullrev, short
7 from node import bin, hex, nullid, nullrev, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock, transaction, store, encoding, base85
11 import lock, transaction, store, encoding, base85
12 import scmutil, util, extensions, hook, error, revset
12 import scmutil, util, extensions, hook, error, revset
13 import match as matchmod
13 import match as matchmod
14 import merge as mergemod
14 import merge as mergemod
15 import tags as tagsmod
15 import tags as tagsmod
16 from lock import release
16 from lock import release
17 import weakref, errno, os, time, inspect
17 import weakref, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19 filecache = scmutil.filecache
19 filecache = scmutil.filecache
20
20
21 class storecache(filecache):
21 class storecache(filecache):
22 """filecache for files in the store"""
22 """filecache for files in the store"""
23 def join(self, obj, fname):
23 def join(self, obj, fname):
24 return obj.sjoin(fname)
24 return obj.sjoin(fname)
25
25
26 def unfilteredmeth(orig):
26 def unfilteredmeth(orig):
27 """decorate method that always need to be run on unfiltered version"""
27 """decorate method that always need to be run on unfiltered version"""
28 def wrapper(repo, *args, **kwargs):
28 def wrapper(repo, *args, **kwargs):
29 return orig(repo.unfiltered(), *args, **kwargs)
29 return orig(repo.unfiltered(), *args, **kwargs)
30 return wrapper
30 return wrapper
31
31
32 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
32 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
33 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
33 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
34
34
35 class localpeer(peer.peerrepository):
35 class localpeer(peer.peerrepository):
36 '''peer for a local repo; reflects only the most recent API'''
36 '''peer for a local repo; reflects only the most recent API'''
37
37
38 def __init__(self, repo, caps=MODERNCAPS):
38 def __init__(self, repo, caps=MODERNCAPS):
39 peer.peerrepository.__init__(self)
39 peer.peerrepository.__init__(self)
40 self._repo = repo
40 self._repo = repo
41 self.ui = repo.ui
41 self.ui = repo.ui
42 self._caps = repo._restrictcapabilities(caps)
42 self._caps = repo._restrictcapabilities(caps)
43 self.requirements = repo.requirements
43 self.requirements = repo.requirements
44 self.supportedformats = repo.supportedformats
44 self.supportedformats = repo.supportedformats
45
45
46 def close(self):
46 def close(self):
47 self._repo.close()
47 self._repo.close()
48
48
49 def _capabilities(self):
49 def _capabilities(self):
50 return self._caps
50 return self._caps
51
51
52 def local(self):
52 def local(self):
53 return self._repo
53 return self._repo
54
54
55 def canpush(self):
55 def canpush(self):
56 return True
56 return True
57
57
58 def url(self):
58 def url(self):
59 return self._repo.url()
59 return self._repo.url()
60
60
61 def lookup(self, key):
61 def lookup(self, key):
62 return self._repo.lookup(key)
62 return self._repo.lookup(key)
63
63
64 def branchmap(self):
64 def branchmap(self):
65 return discovery.visiblebranchmap(self._repo)
65 return discovery.visiblebranchmap(self._repo)
66
66
67 def heads(self):
67 def heads(self):
68 return discovery.visibleheads(self._repo)
68 return discovery.visibleheads(self._repo)
69
69
70 def known(self, nodes):
70 def known(self, nodes):
71 return self._repo.known(nodes)
71 return self._repo.known(nodes)
72
72
73 def getbundle(self, source, heads=None, common=None):
73 def getbundle(self, source, heads=None, common=None):
74 return self._repo.getbundle(source, heads=heads, common=common)
74 return self._repo.getbundle(source, heads=heads, common=common)
75
75
76 # TODO We might want to move the next two calls into legacypeer and add
76 # TODO We might want to move the next two calls into legacypeer and add
77 # unbundle instead.
77 # unbundle instead.
78
78
79 def lock(self):
79 def lock(self):
80 return self._repo.lock()
80 return self._repo.lock()
81
81
82 def addchangegroup(self, cg, source, url):
82 def addchangegroup(self, cg, source, url):
83 return self._repo.addchangegroup(cg, source, url)
83 return self._repo.addchangegroup(cg, source, url)
84
84
85 def pushkey(self, namespace, key, old, new):
85 def pushkey(self, namespace, key, old, new):
86 return self._repo.pushkey(namespace, key, old, new)
86 return self._repo.pushkey(namespace, key, old, new)
87
87
88 def listkeys(self, namespace):
88 def listkeys(self, namespace):
89 return self._repo.listkeys(namespace)
89 return self._repo.listkeys(namespace)
90
90
91 def debugwireargs(self, one, two, three=None, four=None, five=None):
91 def debugwireargs(self, one, two, three=None, four=None, five=None):
92 '''used to test argument passing over the wire'''
92 '''used to test argument passing over the wire'''
93 return "%s %s %s %s %s" % (one, two, three, four, five)
93 return "%s %s %s %s %s" % (one, two, three, four, five)
94
94
95 class locallegacypeer(localpeer):
95 class locallegacypeer(localpeer):
96 '''peer extension which implements legacy methods too; used for tests with
96 '''peer extension which implements legacy methods too; used for tests with
97 restricted capabilities'''
97 restricted capabilities'''
98
98
99 def __init__(self, repo):
99 def __init__(self, repo):
100 localpeer.__init__(self, repo, caps=LEGACYCAPS)
100 localpeer.__init__(self, repo, caps=LEGACYCAPS)
101
101
102 def branches(self, nodes):
102 def branches(self, nodes):
103 return self._repo.branches(nodes)
103 return self._repo.branches(nodes)
104
104
105 def between(self, pairs):
105 def between(self, pairs):
106 return self._repo.between(pairs)
106 return self._repo.between(pairs)
107
107
108 def changegroup(self, basenodes, source):
108 def changegroup(self, basenodes, source):
109 return self._repo.changegroup(basenodes, source)
109 return self._repo.changegroup(basenodes, source)
110
110
111 def changegroupsubset(self, bases, heads, source):
111 def changegroupsubset(self, bases, heads, source):
112 return self._repo.changegroupsubset(bases, heads, source)
112 return self._repo.changegroupsubset(bases, heads, source)
113
113
114 class localrepository(object):
114 class localrepository(object):
115
115
116 supportedformats = set(('revlogv1', 'generaldelta'))
116 supportedformats = set(('revlogv1', 'generaldelta'))
117 supported = supportedformats | set(('store', 'fncache', 'shared',
117 supported = supportedformats | set(('store', 'fncache', 'shared',
118 'dotencode'))
118 'dotencode'))
119 openerreqs = set(('revlogv1', 'generaldelta'))
119 openerreqs = set(('revlogv1', 'generaldelta'))
120 requirements = ['revlogv1']
120 requirements = ['revlogv1']
121
121
122 def _baserequirements(self, create):
122 def _baserequirements(self, create):
123 return self.requirements[:]
123 return self.requirements[:]
124
124
125 def __init__(self, baseui, path=None, create=False):
125 def __init__(self, baseui, path=None, create=False):
126 self.wvfs = scmutil.vfs(path, expand=True)
126 self.wvfs = scmutil.vfs(path, expand=True)
127 self.wopener = self.wvfs
127 self.wopener = self.wvfs
128 self.root = self.wvfs.base
128 self.root = self.wvfs.base
129 self.path = self.wvfs.join(".hg")
129 self.path = self.wvfs.join(".hg")
130 self.origroot = path
130 self.origroot = path
131 self.auditor = scmutil.pathauditor(self.root, self._checknested)
131 self.auditor = scmutil.pathauditor(self.root, self._checknested)
132 self.vfs = scmutil.vfs(self.path)
132 self.vfs = scmutil.vfs(self.path)
133 self.opener = self.vfs
133 self.opener = self.vfs
134 self.baseui = baseui
134 self.baseui = baseui
135 self.ui = baseui.copy()
135 self.ui = baseui.copy()
136 # A list of callback to shape the phase if no data were found.
136 # A list of callback to shape the phase if no data were found.
137 # Callback are in the form: func(repo, roots) --> processed root.
137 # Callback are in the form: func(repo, roots) --> processed root.
138 # This list it to be filled by extension during repo setup
138 # This list it to be filled by extension during repo setup
139 self._phasedefaults = []
139 self._phasedefaults = []
140 try:
140 try:
141 self.ui.readconfig(self.join("hgrc"), self.root)
141 self.ui.readconfig(self.join("hgrc"), self.root)
142 extensions.loadall(self.ui)
142 extensions.loadall(self.ui)
143 except IOError:
143 except IOError:
144 pass
144 pass
145
145
146 if not self.vfs.isdir():
146 if not self.vfs.isdir():
147 if create:
147 if create:
148 if not self.wvfs.exists():
148 if not self.wvfs.exists():
149 self.wvfs.makedirs()
149 self.wvfs.makedirs()
150 self.vfs.makedir(notindexed=True)
150 self.vfs.makedir(notindexed=True)
151 requirements = self._baserequirements(create)
151 requirements = self._baserequirements(create)
152 if self.ui.configbool('format', 'usestore', True):
152 if self.ui.configbool('format', 'usestore', True):
153 self.vfs.mkdir("store")
153 self.vfs.mkdir("store")
154 requirements.append("store")
154 requirements.append("store")
155 if self.ui.configbool('format', 'usefncache', True):
155 if self.ui.configbool('format', 'usefncache', True):
156 requirements.append("fncache")
156 requirements.append("fncache")
157 if self.ui.configbool('format', 'dotencode', True):
157 if self.ui.configbool('format', 'dotencode', True):
158 requirements.append('dotencode')
158 requirements.append('dotencode')
159 # create an invalid changelog
159 # create an invalid changelog
160 self.vfs.append(
160 self.vfs.append(
161 "00changelog.i",
161 "00changelog.i",
162 '\0\0\0\2' # represents revlogv2
162 '\0\0\0\2' # represents revlogv2
163 ' dummy changelog to prevent using the old repo layout'
163 ' dummy changelog to prevent using the old repo layout'
164 )
164 )
165 if self.ui.configbool('format', 'generaldelta', False):
165 if self.ui.configbool('format', 'generaldelta', False):
166 requirements.append("generaldelta")
166 requirements.append("generaldelta")
167 requirements = set(requirements)
167 requirements = set(requirements)
168 else:
168 else:
169 raise error.RepoError(_("repository %s not found") % path)
169 raise error.RepoError(_("repository %s not found") % path)
170 elif create:
170 elif create:
171 raise error.RepoError(_("repository %s already exists") % path)
171 raise error.RepoError(_("repository %s already exists") % path)
172 else:
172 else:
173 try:
173 try:
174 requirements = scmutil.readrequires(self.vfs, self.supported)
174 requirements = scmutil.readrequires(self.vfs, self.supported)
175 except IOError, inst:
175 except IOError, inst:
176 if inst.errno != errno.ENOENT:
176 if inst.errno != errno.ENOENT:
177 raise
177 raise
178 requirements = set()
178 requirements = set()
179
179
180 self.sharedpath = self.path
180 self.sharedpath = self.path
181 try:
181 try:
182 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
182 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
183 if not os.path.exists(s):
183 if not os.path.exists(s):
184 raise error.RepoError(
184 raise error.RepoError(
185 _('.hg/sharedpath points to nonexistent directory %s') % s)
185 _('.hg/sharedpath points to nonexistent directory %s') % s)
186 self.sharedpath = s
186 self.sharedpath = s
187 except IOError, inst:
187 except IOError, inst:
188 if inst.errno != errno.ENOENT:
188 if inst.errno != errno.ENOENT:
189 raise
189 raise
190
190
191 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
191 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
192 self.spath = self.store.path
192 self.spath = self.store.path
193 self.svfs = self.store.vfs
193 self.svfs = self.store.vfs
194 self.sopener = self.svfs
194 self.sopener = self.svfs
195 self.sjoin = self.store.join
195 self.sjoin = self.store.join
196 self.vfs.createmode = self.store.createmode
196 self.vfs.createmode = self.store.createmode
197 self._applyrequirements(requirements)
197 self._applyrequirements(requirements)
198 if create:
198 if create:
199 self._writerequirements()
199 self._writerequirements()
200
200
201
201
202 self._branchcache = None
202 self._branchcache = None
203 self._branchcachetip = None
203 self._branchcachetip = None
204 self.filterpats = {}
204 self.filterpats = {}
205 self._datafilters = {}
205 self._datafilters = {}
206 self._transref = self._lockref = self._wlockref = None
206 self._transref = self._lockref = self._wlockref = None
207
207
208 # A cache for various files under .hg/ that tracks file changes,
208 # A cache for various files under .hg/ that tracks file changes,
209 # (used by the filecache decorator)
209 # (used by the filecache decorator)
210 #
210 #
211 # Maps a property name to its util.filecacheentry
211 # Maps a property name to its util.filecacheentry
212 self._filecache = {}
212 self._filecache = {}
213
213
214 def close(self):
214 def close(self):
215 pass
215 pass
216
216
217 def _restrictcapabilities(self, caps):
217 def _restrictcapabilities(self, caps):
218 return caps
218 return caps
219
219
220 def _applyrequirements(self, requirements):
220 def _applyrequirements(self, requirements):
221 self.requirements = requirements
221 self.requirements = requirements
222 self.sopener.options = dict((r, 1) for r in requirements
222 self.sopener.options = dict((r, 1) for r in requirements
223 if r in self.openerreqs)
223 if r in self.openerreqs)
224
224
225 def _writerequirements(self):
225 def _writerequirements(self):
226 reqfile = self.opener("requires", "w")
226 reqfile = self.opener("requires", "w")
227 for r in self.requirements:
227 for r in self.requirements:
228 reqfile.write("%s\n" % r)
228 reqfile.write("%s\n" % r)
229 reqfile.close()
229 reqfile.close()
230
230
231 def _checknested(self, path):
231 def _checknested(self, path):
232 """Determine if path is a legal nested repository."""
232 """Determine if path is a legal nested repository."""
233 if not path.startswith(self.root):
233 if not path.startswith(self.root):
234 return False
234 return False
235 subpath = path[len(self.root) + 1:]
235 subpath = path[len(self.root) + 1:]
236 normsubpath = util.pconvert(subpath)
236 normsubpath = util.pconvert(subpath)
237
237
238 # XXX: Checking against the current working copy is wrong in
238 # XXX: Checking against the current working copy is wrong in
239 # the sense that it can reject things like
239 # the sense that it can reject things like
240 #
240 #
241 # $ hg cat -r 10 sub/x.txt
241 # $ hg cat -r 10 sub/x.txt
242 #
242 #
243 # if sub/ is no longer a subrepository in the working copy
243 # if sub/ is no longer a subrepository in the working copy
244 # parent revision.
244 # parent revision.
245 #
245 #
246 # However, it can of course also allow things that would have
246 # However, it can of course also allow things that would have
247 # been rejected before, such as the above cat command if sub/
247 # been rejected before, such as the above cat command if sub/
248 # is a subrepository now, but was a normal directory before.
248 # is a subrepository now, but was a normal directory before.
249 # The old path auditor would have rejected by mistake since it
249 # The old path auditor would have rejected by mistake since it
250 # panics when it sees sub/.hg/.
250 # panics when it sees sub/.hg/.
251 #
251 #
252 # All in all, checking against the working copy seems sensible
252 # All in all, checking against the working copy seems sensible
253 # since we want to prevent access to nested repositories on
253 # since we want to prevent access to nested repositories on
254 # the filesystem *now*.
254 # the filesystem *now*.
255 ctx = self[None]
255 ctx = self[None]
256 parts = util.splitpath(subpath)
256 parts = util.splitpath(subpath)
257 while parts:
257 while parts:
258 prefix = '/'.join(parts)
258 prefix = '/'.join(parts)
259 if prefix in ctx.substate:
259 if prefix in ctx.substate:
260 if prefix == normsubpath:
260 if prefix == normsubpath:
261 return True
261 return True
262 else:
262 else:
263 sub = ctx.sub(prefix)
263 sub = ctx.sub(prefix)
264 return sub.checknested(subpath[len(prefix) + 1:])
264 return sub.checknested(subpath[len(prefix) + 1:])
265 else:
265 else:
266 parts.pop()
266 parts.pop()
267 return False
267 return False
268
268
269 def peer(self):
269 def peer(self):
270 return localpeer(self) # not cached to avoid reference cycle
270 return localpeer(self) # not cached to avoid reference cycle
271
271
272 def unfiltered(self):
272 def unfiltered(self):
273 """Return unfiltered version of the repository
273 """Return unfiltered version of the repository
274
274
275 Intended to be ovewritten by filtered repo."""
275 Intended to be ovewritten by filtered repo."""
276 return self
276 return self
277
277
278 @filecache('bookmarks')
278 @filecache('bookmarks')
279 def _bookmarks(self):
279 def _bookmarks(self):
280 return bookmarks.bmstore(self)
280 return bookmarks.bmstore(self)
281
281
282 @filecache('bookmarks.current')
282 @filecache('bookmarks.current')
283 def _bookmarkcurrent(self):
283 def _bookmarkcurrent(self):
284 return bookmarks.readcurrent(self)
284 return bookmarks.readcurrent(self)
285
285
286 def bookmarkheads(self, bookmark):
286 def bookmarkheads(self, bookmark):
287 name = bookmark.split('@', 1)[0]
287 name = bookmark.split('@', 1)[0]
288 heads = []
288 heads = []
289 for mark, n in self._bookmarks.iteritems():
289 for mark, n in self._bookmarks.iteritems():
290 if mark.split('@', 1)[0] == name:
290 if mark.split('@', 1)[0] == name:
291 heads.append(n)
291 heads.append(n)
292 return heads
292 return heads
293
293
294 @storecache('phaseroots')
294 @storecache('phaseroots')
295 def _phasecache(self):
295 def _phasecache(self):
296 return phases.phasecache(self, self._phasedefaults)
296 return phases.phasecache(self, self._phasedefaults)
297
297
298 @storecache('obsstore')
298 @storecache('obsstore')
299 def obsstore(self):
299 def obsstore(self):
300 store = obsolete.obsstore(self.sopener)
300 store = obsolete.obsstore(self.sopener)
301 if store and not obsolete._enabled:
301 if store and not obsolete._enabled:
302 # message is rare enough to not be translated
302 # message is rare enough to not be translated
303 msg = 'obsolete feature not enabled but %i markers found!\n'
303 msg = 'obsolete feature not enabled but %i markers found!\n'
304 self.ui.warn(msg % len(list(store)))
304 self.ui.warn(msg % len(list(store)))
305 return store
305 return store
306
306
307 @propertycache
307 @propertycache
308 def hiddenrevs(self):
308 def hiddenrevs(self):
309 """hiddenrevs: revs that should be hidden by command and tools
309 """hiddenrevs: revs that should be hidden by command and tools
310
310
311 This set is carried on the repo to ease initialization and lazy
311 This set is carried on the repo to ease initialization and lazy
312 loading; it'll probably move back to changelog for efficiency and
312 loading; it'll probably move back to changelog for efficiency and
313 consistency reasons.
313 consistency reasons.
314
314
315 Note that the hiddenrevs will needs invalidations when
315 Note that the hiddenrevs will needs invalidations when
316 - a new changesets is added (possible unstable above extinct)
316 - a new changesets is added (possible unstable above extinct)
317 - a new obsolete marker is added (possible new extinct changeset)
317 - a new obsolete marker is added (possible new extinct changeset)
318
318
319 hidden changesets cannot have non-hidden descendants
319 hidden changesets cannot have non-hidden descendants
320 """
320 """
321 hidden = set()
321 hidden = set()
322 if self.obsstore:
322 if self.obsstore:
323 ### hide extinct changeset that are not accessible by any mean
323 ### hide extinct changeset that are not accessible by any mean
324 hiddenquery = 'extinct() - ::(. + bookmark())'
324 hiddenquery = 'extinct() - ::(. + bookmark())'
325 hidden.update(self.revs(hiddenquery))
325 hidden.update(self.revs(hiddenquery))
326 return hidden
326 return hidden
327
327
328 @storecache('00changelog.i')
328 @storecache('00changelog.i')
329 def changelog(self):
329 def changelog(self):
330 c = changelog.changelog(self.sopener)
330 c = changelog.changelog(self.sopener)
331 if 'HG_PENDING' in os.environ:
331 if 'HG_PENDING' in os.environ:
332 p = os.environ['HG_PENDING']
332 p = os.environ['HG_PENDING']
333 if p.startswith(self.root):
333 if p.startswith(self.root):
334 c.readpending('00changelog.i.a')
334 c.readpending('00changelog.i.a')
335 return c
335 return c
336
336
337 @storecache('00manifest.i')
337 @storecache('00manifest.i')
338 def manifest(self):
338 def manifest(self):
339 return manifest.manifest(self.sopener)
339 return manifest.manifest(self.sopener)
340
340
341 @filecache('dirstate')
341 @filecache('dirstate')
342 def dirstate(self):
342 def dirstate(self):
343 warned = [0]
343 warned = [0]
344 def validate(node):
344 def validate(node):
345 try:
345 try:
346 self.changelog.rev(node)
346 self.changelog.rev(node)
347 return node
347 return node
348 except error.LookupError:
348 except error.LookupError:
349 if not warned[0]:
349 if not warned[0]:
350 warned[0] = True
350 warned[0] = True
351 self.ui.warn(_("warning: ignoring unknown"
351 self.ui.warn(_("warning: ignoring unknown"
352 " working parent %s!\n") % short(node))
352 " working parent %s!\n") % short(node))
353 return nullid
353 return nullid
354
354
355 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
355 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
356
356
357 def __getitem__(self, changeid):
357 def __getitem__(self, changeid):
358 if changeid is None:
358 if changeid is None:
359 return context.workingctx(self)
359 return context.workingctx(self)
360 return context.changectx(self, changeid)
360 return context.changectx(self, changeid)
361
361
362 def __contains__(self, changeid):
362 def __contains__(self, changeid):
363 try:
363 try:
364 return bool(self.lookup(changeid))
364 return bool(self.lookup(changeid))
365 except error.RepoLookupError:
365 except error.RepoLookupError:
366 return False
366 return False
367
367
368 def __nonzero__(self):
368 def __nonzero__(self):
369 return True
369 return True
370
370
371 def __len__(self):
371 def __len__(self):
372 return len(self.changelog)
372 return len(self.changelog)
373
373
374 def __iter__(self):
374 def __iter__(self):
375 return iter(self.changelog)
375 return iter(self.changelog)
376
376
377 def revs(self, expr, *args):
377 def revs(self, expr, *args):
378 '''Return a list of revisions matching the given revset'''
378 '''Return a list of revisions matching the given revset'''
379 expr = revset.formatspec(expr, *args)
379 expr = revset.formatspec(expr, *args)
380 m = revset.match(None, expr)
380 m = revset.match(None, expr)
381 return [r for r in m(self, list(self))]
381 return [r for r in m(self, list(self))]
382
382
383 def set(self, expr, *args):
383 def set(self, expr, *args):
384 '''
384 '''
385 Yield a context for each matching revision, after doing arg
385 Yield a context for each matching revision, after doing arg
386 replacement via revset.formatspec
386 replacement via revset.formatspec
387 '''
387 '''
388 for r in self.revs(expr, *args):
388 for r in self.revs(expr, *args):
389 yield self[r]
389 yield self[r]
390
390
391 def url(self):
391 def url(self):
392 return 'file:' + self.root
392 return 'file:' + self.root
393
393
394 def hook(self, name, throw=False, **args):
394 def hook(self, name, throw=False, **args):
395 return hook.hook(self.ui, self, name, throw, **args)
395 return hook.hook(self.ui, self, name, throw, **args)
396
396
397 @unfilteredmeth
397 def _tag(self, names, node, message, local, user, date, extra={}):
398 def _tag(self, names, node, message, local, user, date, extra={}):
398 if isinstance(names, str):
399 if isinstance(names, str):
399 names = (names,)
400 names = (names,)
400
401
401 branches = self.branchmap()
402 branches = self.branchmap()
402 for name in names:
403 for name in names:
403 self.hook('pretag', throw=True, node=hex(node), tag=name,
404 self.hook('pretag', throw=True, node=hex(node), tag=name,
404 local=local)
405 local=local)
405 if name in branches:
406 if name in branches:
406 self.ui.warn(_("warning: tag %s conflicts with existing"
407 self.ui.warn(_("warning: tag %s conflicts with existing"
407 " branch name\n") % name)
408 " branch name\n") % name)
408
409
409 def writetags(fp, names, munge, prevtags):
410 def writetags(fp, names, munge, prevtags):
410 fp.seek(0, 2)
411 fp.seek(0, 2)
411 if prevtags and prevtags[-1] != '\n':
412 if prevtags and prevtags[-1] != '\n':
412 fp.write('\n')
413 fp.write('\n')
413 for name in names:
414 for name in names:
414 m = munge and munge(name) or name
415 m = munge and munge(name) or name
415 if (self._tagscache.tagtypes and
416 if (self._tagscache.tagtypes and
416 name in self._tagscache.tagtypes):
417 name in self._tagscache.tagtypes):
417 old = self.tags().get(name, nullid)
418 old = self.tags().get(name, nullid)
418 fp.write('%s %s\n' % (hex(old), m))
419 fp.write('%s %s\n' % (hex(old), m))
419 fp.write('%s %s\n' % (hex(node), m))
420 fp.write('%s %s\n' % (hex(node), m))
420 fp.close()
421 fp.close()
421
422
422 prevtags = ''
423 prevtags = ''
423 if local:
424 if local:
424 try:
425 try:
425 fp = self.opener('localtags', 'r+')
426 fp = self.opener('localtags', 'r+')
426 except IOError:
427 except IOError:
427 fp = self.opener('localtags', 'a')
428 fp = self.opener('localtags', 'a')
428 else:
429 else:
429 prevtags = fp.read()
430 prevtags = fp.read()
430
431
431 # local tags are stored in the current charset
432 # local tags are stored in the current charset
432 writetags(fp, names, None, prevtags)
433 writetags(fp, names, None, prevtags)
433 for name in names:
434 for name in names:
434 self.hook('tag', node=hex(node), tag=name, local=local)
435 self.hook('tag', node=hex(node), tag=name, local=local)
435 return
436 return
436
437
437 try:
438 try:
438 fp = self.wfile('.hgtags', 'rb+')
439 fp = self.wfile('.hgtags', 'rb+')
439 except IOError, e:
440 except IOError, e:
440 if e.errno != errno.ENOENT:
441 if e.errno != errno.ENOENT:
441 raise
442 raise
442 fp = self.wfile('.hgtags', 'ab')
443 fp = self.wfile('.hgtags', 'ab')
443 else:
444 else:
444 prevtags = fp.read()
445 prevtags = fp.read()
445
446
446 # committed tags are stored in UTF-8
447 # committed tags are stored in UTF-8
447 writetags(fp, names, encoding.fromlocal, prevtags)
448 writetags(fp, names, encoding.fromlocal, prevtags)
448
449
449 fp.close()
450 fp.close()
450
451
451 self.invalidatecaches()
452 self.invalidatecaches()
452
453
453 if '.hgtags' not in self.dirstate:
454 if '.hgtags' not in self.dirstate:
454 self[None].add(['.hgtags'])
455 self[None].add(['.hgtags'])
455
456
456 m = matchmod.exact(self.root, '', ['.hgtags'])
457 m = matchmod.exact(self.root, '', ['.hgtags'])
457 tagnode = self.commit(message, user, date, extra=extra, match=m)
458 tagnode = self.commit(message, user, date, extra=extra, match=m)
458
459
459 for name in names:
460 for name in names:
460 self.hook('tag', node=hex(node), tag=name, local=local)
461 self.hook('tag', node=hex(node), tag=name, local=local)
461
462
462 return tagnode
463 return tagnode
463
464
464 def tag(self, names, node, message, local, user, date):
465 def tag(self, names, node, message, local, user, date):
465 '''tag a revision with one or more symbolic names.
466 '''tag a revision with one or more symbolic names.
466
467
467 names is a list of strings or, when adding a single tag, names may be a
468 names is a list of strings or, when adding a single tag, names may be a
468 string.
469 string.
469
470
470 if local is True, the tags are stored in a per-repository file.
471 if local is True, the tags are stored in a per-repository file.
471 otherwise, they are stored in the .hgtags file, and a new
472 otherwise, they are stored in the .hgtags file, and a new
472 changeset is committed with the change.
473 changeset is committed with the change.
473
474
474 keyword arguments:
475 keyword arguments:
475
476
476 local: whether to store tags in non-version-controlled file
477 local: whether to store tags in non-version-controlled file
477 (default False)
478 (default False)
478
479
479 message: commit message to use if committing
480 message: commit message to use if committing
480
481
481 user: name of user to use if committing
482 user: name of user to use if committing
482
483
483 date: date tuple to use if committing'''
484 date: date tuple to use if committing'''
484
485
485 if not local:
486 if not local:
486 for x in self.status()[:5]:
487 for x in self.status()[:5]:
487 if '.hgtags' in x:
488 if '.hgtags' in x:
488 raise util.Abort(_('working copy of .hgtags is changed '
489 raise util.Abort(_('working copy of .hgtags is changed '
489 '(please commit .hgtags manually)'))
490 '(please commit .hgtags manually)'))
490
491
491 self.tags() # instantiate the cache
492 self.tags() # instantiate the cache
492 self._tag(names, node, message, local, user, date)
493 self._tag(names, node, message, local, user, date)
493
494
494 @propertycache
495 @propertycache
495 def _tagscache(self):
496 def _tagscache(self):
496 '''Returns a tagscache object that contains various tags related
497 '''Returns a tagscache object that contains various tags related
497 caches.'''
498 caches.'''
498
499
499 # This simplifies its cache management by having one decorated
500 # This simplifies its cache management by having one decorated
500 # function (this one) and the rest simply fetch things from it.
501 # function (this one) and the rest simply fetch things from it.
501 class tagscache(object):
502 class tagscache(object):
502 def __init__(self):
503 def __init__(self):
503 # These two define the set of tags for this repository. tags
504 # These two define the set of tags for this repository. tags
504 # maps tag name to node; tagtypes maps tag name to 'global' or
505 # maps tag name to node; tagtypes maps tag name to 'global' or
505 # 'local'. (Global tags are defined by .hgtags across all
506 # 'local'. (Global tags are defined by .hgtags across all
506 # heads, and local tags are defined in .hg/localtags.)
507 # heads, and local tags are defined in .hg/localtags.)
507 # They constitute the in-memory cache of tags.
508 # They constitute the in-memory cache of tags.
508 self.tags = self.tagtypes = None
509 self.tags = self.tagtypes = None
509
510
510 self.nodetagscache = self.tagslist = None
511 self.nodetagscache = self.tagslist = None
511
512
512 cache = tagscache()
513 cache = tagscache()
513 cache.tags, cache.tagtypes = self._findtags()
514 cache.tags, cache.tagtypes = self._findtags()
514
515
515 return cache
516 return cache
516
517
517 def tags(self):
518 def tags(self):
518 '''return a mapping of tag to node'''
519 '''return a mapping of tag to node'''
519 t = {}
520 t = {}
520 if self.changelog.filteredrevs:
521 if self.changelog.filteredrevs:
521 tags, tt = self._findtags()
522 tags, tt = self._findtags()
522 else:
523 else:
523 tags = self._tagscache.tags
524 tags = self._tagscache.tags
524 for k, v in tags.iteritems():
525 for k, v in tags.iteritems():
525 try:
526 try:
526 # ignore tags to unknown nodes
527 # ignore tags to unknown nodes
527 self.changelog.rev(v)
528 self.changelog.rev(v)
528 t[k] = v
529 t[k] = v
529 except (error.LookupError, ValueError):
530 except (error.LookupError, ValueError):
530 pass
531 pass
531 return t
532 return t
532
533
533 def _findtags(self):
534 def _findtags(self):
534 '''Do the hard work of finding tags. Return a pair of dicts
535 '''Do the hard work of finding tags. Return a pair of dicts
535 (tags, tagtypes) where tags maps tag name to node, and tagtypes
536 (tags, tagtypes) where tags maps tag name to node, and tagtypes
536 maps tag name to a string like \'global\' or \'local\'.
537 maps tag name to a string like \'global\' or \'local\'.
537 Subclasses or extensions are free to add their own tags, but
538 Subclasses or extensions are free to add their own tags, but
538 should be aware that the returned dicts will be retained for the
539 should be aware that the returned dicts will be retained for the
539 duration of the localrepo object.'''
540 duration of the localrepo object.'''
540
541
541 # XXX what tagtype should subclasses/extensions use? Currently
542 # XXX what tagtype should subclasses/extensions use? Currently
542 # mq and bookmarks add tags, but do not set the tagtype at all.
543 # mq and bookmarks add tags, but do not set the tagtype at all.
543 # Should each extension invent its own tag type? Should there
544 # Should each extension invent its own tag type? Should there
544 # be one tagtype for all such "virtual" tags? Or is the status
545 # be one tagtype for all such "virtual" tags? Or is the status
545 # quo fine?
546 # quo fine?
546
547
547 alltags = {} # map tag name to (node, hist)
548 alltags = {} # map tag name to (node, hist)
548 tagtypes = {}
549 tagtypes = {}
549
550
550 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
551 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
551 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
552 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
552
553
553 # Build the return dicts. Have to re-encode tag names because
554 # Build the return dicts. Have to re-encode tag names because
554 # the tags module always uses UTF-8 (in order not to lose info
555 # the tags module always uses UTF-8 (in order not to lose info
555 # writing to the cache), but the rest of Mercurial wants them in
556 # writing to the cache), but the rest of Mercurial wants them in
556 # local encoding.
557 # local encoding.
557 tags = {}
558 tags = {}
558 for (name, (node, hist)) in alltags.iteritems():
559 for (name, (node, hist)) in alltags.iteritems():
559 if node != nullid:
560 if node != nullid:
560 tags[encoding.tolocal(name)] = node
561 tags[encoding.tolocal(name)] = node
561 tags['tip'] = self.changelog.tip()
562 tags['tip'] = self.changelog.tip()
562 tagtypes = dict([(encoding.tolocal(name), value)
563 tagtypes = dict([(encoding.tolocal(name), value)
563 for (name, value) in tagtypes.iteritems()])
564 for (name, value) in tagtypes.iteritems()])
564 return (tags, tagtypes)
565 return (tags, tagtypes)
565
566
566 def tagtype(self, tagname):
567 def tagtype(self, tagname):
567 '''
568 '''
568 return the type of the given tag. result can be:
569 return the type of the given tag. result can be:
569
570
570 'local' : a local tag
571 'local' : a local tag
571 'global' : a global tag
572 'global' : a global tag
572 None : tag does not exist
573 None : tag does not exist
573 '''
574 '''
574
575
575 return self._tagscache.tagtypes.get(tagname)
576 return self._tagscache.tagtypes.get(tagname)
576
577
577 def tagslist(self):
578 def tagslist(self):
578 '''return a list of tags ordered by revision'''
579 '''return a list of tags ordered by revision'''
579 if not self._tagscache.tagslist:
580 if not self._tagscache.tagslist:
580 l = []
581 l = []
581 for t, n in self.tags().iteritems():
582 for t, n in self.tags().iteritems():
582 r = self.changelog.rev(n)
583 r = self.changelog.rev(n)
583 l.append((r, t, n))
584 l.append((r, t, n))
584 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
585 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
585
586
586 return self._tagscache.tagslist
587 return self._tagscache.tagslist
587
588
588 def nodetags(self, node):
589 def nodetags(self, node):
589 '''return the tags associated with a node'''
590 '''return the tags associated with a node'''
590 if not self._tagscache.nodetagscache:
591 if not self._tagscache.nodetagscache:
591 nodetagscache = {}
592 nodetagscache = {}
592 for t, n in self._tagscache.tags.iteritems():
593 for t, n in self._tagscache.tags.iteritems():
593 nodetagscache.setdefault(n, []).append(t)
594 nodetagscache.setdefault(n, []).append(t)
594 for tags in nodetagscache.itervalues():
595 for tags in nodetagscache.itervalues():
595 tags.sort()
596 tags.sort()
596 self._tagscache.nodetagscache = nodetagscache
597 self._tagscache.nodetagscache = nodetagscache
597 return self._tagscache.nodetagscache.get(node, [])
598 return self._tagscache.nodetagscache.get(node, [])
598
599
599 def nodebookmarks(self, node):
600 def nodebookmarks(self, node):
600 marks = []
601 marks = []
601 for bookmark, n in self._bookmarks.iteritems():
602 for bookmark, n in self._bookmarks.iteritems():
602 if n == node:
603 if n == node:
603 marks.append(bookmark)
604 marks.append(bookmark)
604 return sorted(marks)
605 return sorted(marks)
605
606
606 def _branchtags(self, partial, lrev):
607 def _branchtags(self, partial, lrev):
607 # TODO: rename this function?
608 # TODO: rename this function?
608 tiprev = len(self) - 1
609 tiprev = len(self) - 1
609 if lrev != tiprev:
610 if lrev != tiprev:
610 ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
611 ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
611 self._updatebranchcache(partial, ctxgen)
612 self._updatebranchcache(partial, ctxgen)
612 self._writebranchcache(partial, self.changelog.tip(), tiprev)
613 self._writebranchcache(partial, self.changelog.tip(), tiprev)
613
614
614 return partial
615 return partial
615
616
616 @unfilteredmeth # Until we get a smarter cache management
617 @unfilteredmeth # Until we get a smarter cache management
617 def updatebranchcache(self):
618 def updatebranchcache(self):
618 tip = self.changelog.tip()
619 tip = self.changelog.tip()
619 if self._branchcache is not None and self._branchcachetip == tip:
620 if self._branchcache is not None and self._branchcachetip == tip:
620 return
621 return
621
622
622 oldtip = self._branchcachetip
623 oldtip = self._branchcachetip
623 self._branchcachetip = tip
624 self._branchcachetip = tip
624 if oldtip is None or oldtip not in self.changelog.nodemap:
625 if oldtip is None or oldtip not in self.changelog.nodemap:
625 partial, last, lrev = self._readbranchcache()
626 partial, last, lrev = self._readbranchcache()
626 else:
627 else:
627 lrev = self.changelog.rev(oldtip)
628 lrev = self.changelog.rev(oldtip)
628 partial = self._branchcache
629 partial = self._branchcache
629
630
630 self._branchtags(partial, lrev)
631 self._branchtags(partial, lrev)
631 # this private cache holds all heads (not just the branch tips)
632 # this private cache holds all heads (not just the branch tips)
632 self._branchcache = partial
633 self._branchcache = partial
633
634
634 def branchmap(self):
635 def branchmap(self):
635 '''returns a dictionary {branch: [branchheads]}'''
636 '''returns a dictionary {branch: [branchheads]}'''
636 if self.changelog.filteredrevs:
637 if self.changelog.filteredrevs:
637 # some changeset are excluded we can't use the cache
638 # some changeset are excluded we can't use the cache
638 branchmap = {}
639 branchmap = {}
639 self._updatebranchcache(branchmap, (self[r] for r in self))
640 self._updatebranchcache(branchmap, (self[r] for r in self))
640 return branchmap
641 return branchmap
641 else:
642 else:
642 self.updatebranchcache()
643 self.updatebranchcache()
643 return self._branchcache
644 return self._branchcache
644
645
645
646
646 def _branchtip(self, heads):
647 def _branchtip(self, heads):
647 '''return the tipmost branch head in heads'''
648 '''return the tipmost branch head in heads'''
648 tip = heads[-1]
649 tip = heads[-1]
649 for h in reversed(heads):
650 for h in reversed(heads):
650 if not self[h].closesbranch():
651 if not self[h].closesbranch():
651 tip = h
652 tip = h
652 break
653 break
653 return tip
654 return tip
654
655
655 def branchtip(self, branch):
656 def branchtip(self, branch):
656 '''return the tip node for a given branch'''
657 '''return the tip node for a given branch'''
657 if branch not in self.branchmap():
658 if branch not in self.branchmap():
658 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
659 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
659 return self._branchtip(self.branchmap()[branch])
660 return self._branchtip(self.branchmap()[branch])
660
661
661 def branchtags(self):
662 def branchtags(self):
662 '''return a dict where branch names map to the tipmost head of
663 '''return a dict where branch names map to the tipmost head of
663 the branch, open heads come before closed'''
664 the branch, open heads come before closed'''
664 bt = {}
665 bt = {}
665 for bn, heads in self.branchmap().iteritems():
666 for bn, heads in self.branchmap().iteritems():
666 bt[bn] = self._branchtip(heads)
667 bt[bn] = self._branchtip(heads)
667 return bt
668 return bt
668
669
669 @unfilteredmeth # Until we get a smarter cache management
670 @unfilteredmeth # Until we get a smarter cache management
670 def _readbranchcache(self):
671 def _readbranchcache(self):
671 partial = {}
672 partial = {}
672 try:
673 try:
673 f = self.opener("cache/branchheads")
674 f = self.opener("cache/branchheads")
674 lines = f.read().split('\n')
675 lines = f.read().split('\n')
675 f.close()
676 f.close()
676 except (IOError, OSError):
677 except (IOError, OSError):
677 return {}, nullid, nullrev
678 return {}, nullid, nullrev
678
679
679 try:
680 try:
680 last, lrev = lines.pop(0).split(" ", 1)
681 last, lrev = lines.pop(0).split(" ", 1)
681 last, lrev = bin(last), int(lrev)
682 last, lrev = bin(last), int(lrev)
682 if lrev >= len(self) or self[lrev].node() != last:
683 if lrev >= len(self) or self[lrev].node() != last:
683 # invalidate the cache
684 # invalidate the cache
684 raise ValueError('invalidating branch cache (tip differs)')
685 raise ValueError('invalidating branch cache (tip differs)')
685 for l in lines:
686 for l in lines:
686 if not l:
687 if not l:
687 continue
688 continue
688 node, label = l.split(" ", 1)
689 node, label = l.split(" ", 1)
689 label = encoding.tolocal(label.strip())
690 label = encoding.tolocal(label.strip())
690 if not node in self:
691 if not node in self:
691 raise ValueError('invalidating branch cache because node '+
692 raise ValueError('invalidating branch cache because node '+
692 '%s does not exist' % node)
693 '%s does not exist' % node)
693 partial.setdefault(label, []).append(bin(node))
694 partial.setdefault(label, []).append(bin(node))
694 except KeyboardInterrupt:
695 except KeyboardInterrupt:
695 raise
696 raise
696 except Exception, inst:
697 except Exception, inst:
697 if self.ui.debugflag:
698 if self.ui.debugflag:
698 self.ui.warn(str(inst), '\n')
699 self.ui.warn(str(inst), '\n')
699 partial, last, lrev = {}, nullid, nullrev
700 partial, last, lrev = {}, nullid, nullrev
700 return partial, last, lrev
701 return partial, last, lrev
701
702
702 @unfilteredmeth # Until we get a smarter cache management
703 @unfilteredmeth # Until we get a smarter cache management
703 def _writebranchcache(self, branches, tip, tiprev):
704 def _writebranchcache(self, branches, tip, tiprev):
704 try:
705 try:
705 f = self.opener("cache/branchheads", "w", atomictemp=True)
706 f = self.opener("cache/branchheads", "w", atomictemp=True)
706 f.write("%s %s\n" % (hex(tip), tiprev))
707 f.write("%s %s\n" % (hex(tip), tiprev))
707 for label, nodes in branches.iteritems():
708 for label, nodes in branches.iteritems():
708 for node in nodes:
709 for node in nodes:
709 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
710 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
710 f.close()
711 f.close()
711 except (IOError, OSError):
712 except (IOError, OSError):
712 pass
713 pass
713
714
714 @unfilteredmeth # Until we get a smarter cache management
715 @unfilteredmeth # Until we get a smarter cache management
715 def _updatebranchcache(self, partial, ctxgen):
716 def _updatebranchcache(self, partial, ctxgen):
716 """Given a branchhead cache, partial, that may have extra nodes or be
717 """Given a branchhead cache, partial, that may have extra nodes or be
717 missing heads, and a generator of nodes that are at least a superset of
718 missing heads, and a generator of nodes that are at least a superset of
718 heads missing, this function updates partial to be correct.
719 heads missing, this function updates partial to be correct.
719 """
720 """
720 # collect new branch entries
721 # collect new branch entries
721 newbranches = {}
722 newbranches = {}
722 for c in ctxgen:
723 for c in ctxgen:
723 newbranches.setdefault(c.branch(), []).append(c.node())
724 newbranches.setdefault(c.branch(), []).append(c.node())
724 # if older branchheads are reachable from new ones, they aren't
725 # if older branchheads are reachable from new ones, they aren't
725 # really branchheads. Note checking parents is insufficient:
726 # really branchheads. Note checking parents is insufficient:
726 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
727 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
727 for branch, newnodes in newbranches.iteritems():
728 for branch, newnodes in newbranches.iteritems():
728 bheads = partial.setdefault(branch, [])
729 bheads = partial.setdefault(branch, [])
729 # Remove candidate heads that no longer are in the repo (e.g., as
730 # Remove candidate heads that no longer are in the repo (e.g., as
730 # the result of a strip that just happened). Avoid using 'node in
731 # the result of a strip that just happened). Avoid using 'node in
731 # self' here because that dives down into branchcache code somewhat
732 # self' here because that dives down into branchcache code somewhat
732 # recursively.
733 # recursively.
733 bheadrevs = [self.changelog.rev(node) for node in bheads
734 bheadrevs = [self.changelog.rev(node) for node in bheads
734 if self.changelog.hasnode(node)]
735 if self.changelog.hasnode(node)]
735 newheadrevs = [self.changelog.rev(node) for node in newnodes
736 newheadrevs = [self.changelog.rev(node) for node in newnodes
736 if self.changelog.hasnode(node)]
737 if self.changelog.hasnode(node)]
737 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
738 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
738 # Remove duplicates - nodes that are in newheadrevs and are already
739 # Remove duplicates - nodes that are in newheadrevs and are already
739 # in bheadrevs. This can happen if you strip a node whose parent
740 # in bheadrevs. This can happen if you strip a node whose parent
740 # was already a head (because they're on different branches).
741 # was already a head (because they're on different branches).
741 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
742 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
742
743
743 # Starting from tip means fewer passes over reachable. If we know
744 # Starting from tip means fewer passes over reachable. If we know
744 # the new candidates are not ancestors of existing heads, we don't
745 # the new candidates are not ancestors of existing heads, we don't
745 # have to examine ancestors of existing heads
746 # have to examine ancestors of existing heads
746 if ctxisnew:
747 if ctxisnew:
747 iterrevs = sorted(newheadrevs)
748 iterrevs = sorted(newheadrevs)
748 else:
749 else:
749 iterrevs = list(bheadrevs)
750 iterrevs = list(bheadrevs)
750
751
751 # This loop prunes out two kinds of heads - heads that are
752 # This loop prunes out two kinds of heads - heads that are
752 # superseded by a head in newheadrevs, and newheadrevs that are not
753 # superseded by a head in newheadrevs, and newheadrevs that are not
753 # heads because an existing head is their descendant.
754 # heads because an existing head is their descendant.
754 while iterrevs:
755 while iterrevs:
755 latest = iterrevs.pop()
756 latest = iterrevs.pop()
756 if latest not in bheadrevs:
757 if latest not in bheadrevs:
757 continue
758 continue
758 ancestors = set(self.changelog.ancestors([latest],
759 ancestors = set(self.changelog.ancestors([latest],
759 bheadrevs[0]))
760 bheadrevs[0]))
760 if ancestors:
761 if ancestors:
761 bheadrevs = [b for b in bheadrevs if b not in ancestors]
762 bheadrevs = [b for b in bheadrevs if b not in ancestors]
762 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
763 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
763
764
764 # There may be branches that cease to exist when the last commit in the
765 # There may be branches that cease to exist when the last commit in the
765 # branch was stripped. This code filters them out. Note that the
766 # branch was stripped. This code filters them out. Note that the
766 # branch that ceased to exist may not be in newbranches because
767 # branch that ceased to exist may not be in newbranches because
767 # newbranches is the set of candidate heads, which when you strip the
768 # newbranches is the set of candidate heads, which when you strip the
768 # last commit in a branch will be the parent branch.
769 # last commit in a branch will be the parent branch.
769 for branch in partial.keys():
770 for branch in partial.keys():
770 nodes = [head for head in partial[branch]
771 nodes = [head for head in partial[branch]
771 if self.changelog.hasnode(head)]
772 if self.changelog.hasnode(head)]
772 if not nodes:
773 if not nodes:
773 del partial[branch]
774 del partial[branch]
774
775
775 def lookup(self, key):
776 def lookup(self, key):
776 return self[key].node()
777 return self[key].node()
777
778
778 def lookupbranch(self, key, remote=None):
779 def lookupbranch(self, key, remote=None):
779 repo = remote or self
780 repo = remote or self
780 if key in repo.branchmap():
781 if key in repo.branchmap():
781 return key
782 return key
782
783
783 repo = (remote and remote.local()) and remote or self
784 repo = (remote and remote.local()) and remote or self
784 return repo[key].branch()
785 return repo[key].branch()
785
786
786 def known(self, nodes):
787 def known(self, nodes):
787 nm = self.changelog.nodemap
788 nm = self.changelog.nodemap
788 pc = self._phasecache
789 pc = self._phasecache
789 result = []
790 result = []
790 for n in nodes:
791 for n in nodes:
791 r = nm.get(n)
792 r = nm.get(n)
792 resp = not (r is None or pc.phase(self, r) >= phases.secret)
793 resp = not (r is None or pc.phase(self, r) >= phases.secret)
793 result.append(resp)
794 result.append(resp)
794 return result
795 return result
795
796
796 def local(self):
797 def local(self):
797 return self
798 return self
798
799
799 def cancopy(self):
800 def cancopy(self):
800 return self.local() # so statichttprepo's override of local() works
801 return self.local() # so statichttprepo's override of local() works
801
802
802 def join(self, f):
803 def join(self, f):
803 return os.path.join(self.path, f)
804 return os.path.join(self.path, f)
804
805
805 def wjoin(self, f):
806 def wjoin(self, f):
806 return os.path.join(self.root, f)
807 return os.path.join(self.root, f)
807
808
808 def file(self, f):
809 def file(self, f):
809 if f[0] == '/':
810 if f[0] == '/':
810 f = f[1:]
811 f = f[1:]
811 return filelog.filelog(self.sopener, f)
812 return filelog.filelog(self.sopener, f)
812
813
813 def changectx(self, changeid):
814 def changectx(self, changeid):
814 return self[changeid]
815 return self[changeid]
815
816
816 def parents(self, changeid=None):
817 def parents(self, changeid=None):
817 '''get list of changectxs for parents of changeid'''
818 '''get list of changectxs for parents of changeid'''
818 return self[changeid].parents()
819 return self[changeid].parents()
819
820
820 def setparents(self, p1, p2=nullid):
821 def setparents(self, p1, p2=nullid):
821 copies = self.dirstate.setparents(p1, p2)
822 copies = self.dirstate.setparents(p1, p2)
822 if copies:
823 if copies:
823 # Adjust copy records, the dirstate cannot do it, it
824 # Adjust copy records, the dirstate cannot do it, it
824 # requires access to parents manifests. Preserve them
825 # requires access to parents manifests. Preserve them
825 # only for entries added to first parent.
826 # only for entries added to first parent.
826 pctx = self[p1]
827 pctx = self[p1]
827 for f in copies:
828 for f in copies:
828 if f not in pctx and copies[f] in pctx:
829 if f not in pctx and copies[f] in pctx:
829 self.dirstate.copy(copies[f], f)
830 self.dirstate.copy(copies[f], f)
830
831
831 def filectx(self, path, changeid=None, fileid=None):
832 def filectx(self, path, changeid=None, fileid=None):
832 """changeid can be a changeset revision, node, or tag.
833 """changeid can be a changeset revision, node, or tag.
833 fileid can be a file revision or node."""
834 fileid can be a file revision or node."""
834 return context.filectx(self, path, changeid, fileid)
835 return context.filectx(self, path, changeid, fileid)
835
836
836 def getcwd(self):
837 def getcwd(self):
837 return self.dirstate.getcwd()
838 return self.dirstate.getcwd()
838
839
839 def pathto(self, f, cwd=None):
840 def pathto(self, f, cwd=None):
840 return self.dirstate.pathto(f, cwd)
841 return self.dirstate.pathto(f, cwd)
841
842
842 def wfile(self, f, mode='r'):
843 def wfile(self, f, mode='r'):
843 return self.wopener(f, mode)
844 return self.wopener(f, mode)
844
845
845 def _link(self, f):
846 def _link(self, f):
846 return os.path.islink(self.wjoin(f))
847 return os.path.islink(self.wjoin(f))
847
848
848 def _loadfilter(self, filter):
849 def _loadfilter(self, filter):
849 if filter not in self.filterpats:
850 if filter not in self.filterpats:
850 l = []
851 l = []
851 for pat, cmd in self.ui.configitems(filter):
852 for pat, cmd in self.ui.configitems(filter):
852 if cmd == '!':
853 if cmd == '!':
853 continue
854 continue
854 mf = matchmod.match(self.root, '', [pat])
855 mf = matchmod.match(self.root, '', [pat])
855 fn = None
856 fn = None
856 params = cmd
857 params = cmd
857 for name, filterfn in self._datafilters.iteritems():
858 for name, filterfn in self._datafilters.iteritems():
858 if cmd.startswith(name):
859 if cmd.startswith(name):
859 fn = filterfn
860 fn = filterfn
860 params = cmd[len(name):].lstrip()
861 params = cmd[len(name):].lstrip()
861 break
862 break
862 if not fn:
863 if not fn:
863 fn = lambda s, c, **kwargs: util.filter(s, c)
864 fn = lambda s, c, **kwargs: util.filter(s, c)
864 # Wrap old filters not supporting keyword arguments
865 # Wrap old filters not supporting keyword arguments
865 if not inspect.getargspec(fn)[2]:
866 if not inspect.getargspec(fn)[2]:
866 oldfn = fn
867 oldfn = fn
867 fn = lambda s, c, **kwargs: oldfn(s, c)
868 fn = lambda s, c, **kwargs: oldfn(s, c)
868 l.append((mf, fn, params))
869 l.append((mf, fn, params))
869 self.filterpats[filter] = l
870 self.filterpats[filter] = l
870 return self.filterpats[filter]
871 return self.filterpats[filter]
871
872
872 def _filter(self, filterpats, filename, data):
873 def _filter(self, filterpats, filename, data):
873 for mf, fn, cmd in filterpats:
874 for mf, fn, cmd in filterpats:
874 if mf(filename):
875 if mf(filename):
875 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
876 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
876 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
877 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
877 break
878 break
878
879
879 return data
880 return data
880
881
881 @propertycache
882 @propertycache
882 def _encodefilterpats(self):
883 def _encodefilterpats(self):
883 return self._loadfilter('encode')
884 return self._loadfilter('encode')
884
885
885 @propertycache
886 @propertycache
886 def _decodefilterpats(self):
887 def _decodefilterpats(self):
887 return self._loadfilter('decode')
888 return self._loadfilter('decode')
888
889
889 def adddatafilter(self, name, filter):
890 def adddatafilter(self, name, filter):
890 self._datafilters[name] = filter
891 self._datafilters[name] = filter
891
892
892 def wread(self, filename):
893 def wread(self, filename):
893 if self._link(filename):
894 if self._link(filename):
894 data = os.readlink(self.wjoin(filename))
895 data = os.readlink(self.wjoin(filename))
895 else:
896 else:
896 data = self.wopener.read(filename)
897 data = self.wopener.read(filename)
897 return self._filter(self._encodefilterpats, filename, data)
898 return self._filter(self._encodefilterpats, filename, data)
898
899
899 def wwrite(self, filename, data, flags):
900 def wwrite(self, filename, data, flags):
900 data = self._filter(self._decodefilterpats, filename, data)
901 data = self._filter(self._decodefilterpats, filename, data)
901 if 'l' in flags:
902 if 'l' in flags:
902 self.wopener.symlink(data, filename)
903 self.wopener.symlink(data, filename)
903 else:
904 else:
904 self.wopener.write(filename, data)
905 self.wopener.write(filename, data)
905 if 'x' in flags:
906 if 'x' in flags:
906 util.setflags(self.wjoin(filename), False, True)
907 util.setflags(self.wjoin(filename), False, True)
907
908
908 def wwritedata(self, filename, data):
909 def wwritedata(self, filename, data):
909 return self._filter(self._decodefilterpats, filename, data)
910 return self._filter(self._decodefilterpats, filename, data)
910
911
911 def transaction(self, desc):
912 def transaction(self, desc):
912 tr = self._transref and self._transref() or None
913 tr = self._transref and self._transref() or None
913 if tr and tr.running():
914 if tr and tr.running():
914 return tr.nest()
915 return tr.nest()
915
916
916 # abort here if the journal already exists
917 # abort here if the journal already exists
917 if os.path.exists(self.sjoin("journal")):
918 if os.path.exists(self.sjoin("journal")):
918 raise error.RepoError(
919 raise error.RepoError(
919 _("abandoned transaction found - run hg recover"))
920 _("abandoned transaction found - run hg recover"))
920
921
921 self._writejournal(desc)
922 self._writejournal(desc)
922 renames = [(x, undoname(x)) for x in self._journalfiles()]
923 renames = [(x, undoname(x)) for x in self._journalfiles()]
923
924
924 tr = transaction.transaction(self.ui.warn, self.sopener,
925 tr = transaction.transaction(self.ui.warn, self.sopener,
925 self.sjoin("journal"),
926 self.sjoin("journal"),
926 aftertrans(renames),
927 aftertrans(renames),
927 self.store.createmode)
928 self.store.createmode)
928 self._transref = weakref.ref(tr)
929 self._transref = weakref.ref(tr)
929 return tr
930 return tr
930
931
931 def _journalfiles(self):
932 def _journalfiles(self):
932 return (self.sjoin('journal'), self.join('journal.dirstate'),
933 return (self.sjoin('journal'), self.join('journal.dirstate'),
933 self.join('journal.branch'), self.join('journal.desc'),
934 self.join('journal.branch'), self.join('journal.desc'),
934 self.join('journal.bookmarks'),
935 self.join('journal.bookmarks'),
935 self.sjoin('journal.phaseroots'))
936 self.sjoin('journal.phaseroots'))
936
937
937 def undofiles(self):
938 def undofiles(self):
938 return [undoname(x) for x in self._journalfiles()]
939 return [undoname(x) for x in self._journalfiles()]
939
940
940 def _writejournal(self, desc):
941 def _writejournal(self, desc):
941 self.opener.write("journal.dirstate",
942 self.opener.write("journal.dirstate",
942 self.opener.tryread("dirstate"))
943 self.opener.tryread("dirstate"))
943 self.opener.write("journal.branch",
944 self.opener.write("journal.branch",
944 encoding.fromlocal(self.dirstate.branch()))
945 encoding.fromlocal(self.dirstate.branch()))
945 self.opener.write("journal.desc",
946 self.opener.write("journal.desc",
946 "%d\n%s\n" % (len(self), desc))
947 "%d\n%s\n" % (len(self), desc))
947 self.opener.write("journal.bookmarks",
948 self.opener.write("journal.bookmarks",
948 self.opener.tryread("bookmarks"))
949 self.opener.tryread("bookmarks"))
949 self.sopener.write("journal.phaseroots",
950 self.sopener.write("journal.phaseroots",
950 self.sopener.tryread("phaseroots"))
951 self.sopener.tryread("phaseroots"))
951
952
952 def recover(self):
953 def recover(self):
953 lock = self.lock()
954 lock = self.lock()
954 try:
955 try:
955 if os.path.exists(self.sjoin("journal")):
956 if os.path.exists(self.sjoin("journal")):
956 self.ui.status(_("rolling back interrupted transaction\n"))
957 self.ui.status(_("rolling back interrupted transaction\n"))
957 transaction.rollback(self.sopener, self.sjoin("journal"),
958 transaction.rollback(self.sopener, self.sjoin("journal"),
958 self.ui.warn)
959 self.ui.warn)
959 self.invalidate()
960 self.invalidate()
960 return True
961 return True
961 else:
962 else:
962 self.ui.warn(_("no interrupted transaction available\n"))
963 self.ui.warn(_("no interrupted transaction available\n"))
963 return False
964 return False
964 finally:
965 finally:
965 lock.release()
966 lock.release()
966
967
967 def rollback(self, dryrun=False, force=False):
968 def rollback(self, dryrun=False, force=False):
968 wlock = lock = None
969 wlock = lock = None
969 try:
970 try:
970 wlock = self.wlock()
971 wlock = self.wlock()
971 lock = self.lock()
972 lock = self.lock()
972 if os.path.exists(self.sjoin("undo")):
973 if os.path.exists(self.sjoin("undo")):
973 return self._rollback(dryrun, force)
974 return self._rollback(dryrun, force)
974 else:
975 else:
975 self.ui.warn(_("no rollback information available\n"))
976 self.ui.warn(_("no rollback information available\n"))
976 return 1
977 return 1
977 finally:
978 finally:
978 release(lock, wlock)
979 release(lock, wlock)
979
980
980 def _rollback(self, dryrun, force):
981 def _rollback(self, dryrun, force):
981 ui = self.ui
982 ui = self.ui
982 try:
983 try:
983 args = self.opener.read('undo.desc').splitlines()
984 args = self.opener.read('undo.desc').splitlines()
984 (oldlen, desc, detail) = (int(args[0]), args[1], None)
985 (oldlen, desc, detail) = (int(args[0]), args[1], None)
985 if len(args) >= 3:
986 if len(args) >= 3:
986 detail = args[2]
987 detail = args[2]
987 oldtip = oldlen - 1
988 oldtip = oldlen - 1
988
989
989 if detail and ui.verbose:
990 if detail and ui.verbose:
990 msg = (_('repository tip rolled back to revision %s'
991 msg = (_('repository tip rolled back to revision %s'
991 ' (undo %s: %s)\n')
992 ' (undo %s: %s)\n')
992 % (oldtip, desc, detail))
993 % (oldtip, desc, detail))
993 else:
994 else:
994 msg = (_('repository tip rolled back to revision %s'
995 msg = (_('repository tip rolled back to revision %s'
995 ' (undo %s)\n')
996 ' (undo %s)\n')
996 % (oldtip, desc))
997 % (oldtip, desc))
997 except IOError:
998 except IOError:
998 msg = _('rolling back unknown transaction\n')
999 msg = _('rolling back unknown transaction\n')
999 desc = None
1000 desc = None
1000
1001
1001 if not force and self['.'] != self['tip'] and desc == 'commit':
1002 if not force and self['.'] != self['tip'] and desc == 'commit':
1002 raise util.Abort(
1003 raise util.Abort(
1003 _('rollback of last commit while not checked out '
1004 _('rollback of last commit while not checked out '
1004 'may lose data'), hint=_('use -f to force'))
1005 'may lose data'), hint=_('use -f to force'))
1005
1006
1006 ui.status(msg)
1007 ui.status(msg)
1007 if dryrun:
1008 if dryrun:
1008 return 0
1009 return 0
1009
1010
1010 parents = self.dirstate.parents()
1011 parents = self.dirstate.parents()
1011 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1012 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1012 if os.path.exists(self.join('undo.bookmarks')):
1013 if os.path.exists(self.join('undo.bookmarks')):
1013 util.rename(self.join('undo.bookmarks'),
1014 util.rename(self.join('undo.bookmarks'),
1014 self.join('bookmarks'))
1015 self.join('bookmarks'))
1015 if os.path.exists(self.sjoin('undo.phaseroots')):
1016 if os.path.exists(self.sjoin('undo.phaseroots')):
1016 util.rename(self.sjoin('undo.phaseroots'),
1017 util.rename(self.sjoin('undo.phaseroots'),
1017 self.sjoin('phaseroots'))
1018 self.sjoin('phaseroots'))
1018 self.invalidate()
1019 self.invalidate()
1019
1020
1020 # Discard all cache entries to force reloading everything.
1021 # Discard all cache entries to force reloading everything.
1021 self._filecache.clear()
1022 self._filecache.clear()
1022
1023
1023 parentgone = (parents[0] not in self.changelog.nodemap or
1024 parentgone = (parents[0] not in self.changelog.nodemap or
1024 parents[1] not in self.changelog.nodemap)
1025 parents[1] not in self.changelog.nodemap)
1025 if parentgone:
1026 if parentgone:
1026 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1027 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1027 try:
1028 try:
1028 branch = self.opener.read('undo.branch')
1029 branch = self.opener.read('undo.branch')
1029 self.dirstate.setbranch(encoding.tolocal(branch))
1030 self.dirstate.setbranch(encoding.tolocal(branch))
1030 except IOError:
1031 except IOError:
1031 ui.warn(_('named branch could not be reset: '
1032 ui.warn(_('named branch could not be reset: '
1032 'current branch is still \'%s\'\n')
1033 'current branch is still \'%s\'\n')
1033 % self.dirstate.branch())
1034 % self.dirstate.branch())
1034
1035
1035 self.dirstate.invalidate()
1036 self.dirstate.invalidate()
1036 parents = tuple([p.rev() for p in self.parents()])
1037 parents = tuple([p.rev() for p in self.parents()])
1037 if len(parents) > 1:
1038 if len(parents) > 1:
1038 ui.status(_('working directory now based on '
1039 ui.status(_('working directory now based on '
1039 'revisions %d and %d\n') % parents)
1040 'revisions %d and %d\n') % parents)
1040 else:
1041 else:
1041 ui.status(_('working directory now based on '
1042 ui.status(_('working directory now based on '
1042 'revision %d\n') % parents)
1043 'revision %d\n') % parents)
1043 # TODO: if we know which new heads may result from this rollback, pass
1044 # TODO: if we know which new heads may result from this rollback, pass
1044 # them to destroy(), which will prevent the branchhead cache from being
1045 # them to destroy(), which will prevent the branchhead cache from being
1045 # invalidated.
1046 # invalidated.
1046 self.destroyed()
1047 self.destroyed()
1047 return 0
1048 return 0
1048
1049
1049 def invalidatecaches(self):
1050 def invalidatecaches(self):
1050 def delcache(name):
1051 def delcache(name):
1051 try:
1052 try:
1052 delattr(self, name)
1053 delattr(self, name)
1053 except AttributeError:
1054 except AttributeError:
1054 pass
1055 pass
1055
1056
1056 delcache('_tagscache')
1057 delcache('_tagscache')
1057
1058
1058 self.unfiltered()._branchcache = None # in UTF-8
1059 self.unfiltered()._branchcache = None # in UTF-8
1059 self.unfiltered()._branchcachetip = None
1060 self.unfiltered()._branchcachetip = None
1060 obsolete.clearobscaches(self)
1061 obsolete.clearobscaches(self)
1061
1062
1062 def invalidatedirstate(self):
1063 def invalidatedirstate(self):
1063 '''Invalidates the dirstate, causing the next call to dirstate
1064 '''Invalidates the dirstate, causing the next call to dirstate
1064 to check if it was modified since the last time it was read,
1065 to check if it was modified since the last time it was read,
1065 rereading it if it has.
1066 rereading it if it has.
1066
1067
1067 This is different to dirstate.invalidate() that it doesn't always
1068 This is different to dirstate.invalidate() that it doesn't always
1068 rereads the dirstate. Use dirstate.invalidate() if you want to
1069 rereads the dirstate. Use dirstate.invalidate() if you want to
1069 explicitly read the dirstate again (i.e. restoring it to a previous
1070 explicitly read the dirstate again (i.e. restoring it to a previous
1070 known good state).'''
1071 known good state).'''
1071 if 'dirstate' in self.__dict__:
1072 if 'dirstate' in self.__dict__:
1072 for k in self.dirstate._filecache:
1073 for k in self.dirstate._filecache:
1073 try:
1074 try:
1074 delattr(self.dirstate, k)
1075 delattr(self.dirstate, k)
1075 except AttributeError:
1076 except AttributeError:
1076 pass
1077 pass
1077 delattr(self, 'dirstate')
1078 delattr(self, 'dirstate')
1078
1079
1079 def invalidate(self):
1080 def invalidate(self):
1080 for k in self._filecache:
1081 for k in self._filecache:
1081 # dirstate is invalidated separately in invalidatedirstate()
1082 # dirstate is invalidated separately in invalidatedirstate()
1082 if k == 'dirstate':
1083 if k == 'dirstate':
1083 continue
1084 continue
1084
1085
1085 try:
1086 try:
1086 delattr(self, k)
1087 delattr(self, k)
1087 except AttributeError:
1088 except AttributeError:
1088 pass
1089 pass
1089 self.invalidatecaches()
1090 self.invalidatecaches()
1090
1091
1091 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1092 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1092 try:
1093 try:
1093 l = lock.lock(lockname, 0, releasefn, desc=desc)
1094 l = lock.lock(lockname, 0, releasefn, desc=desc)
1094 except error.LockHeld, inst:
1095 except error.LockHeld, inst:
1095 if not wait:
1096 if not wait:
1096 raise
1097 raise
1097 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1098 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1098 (desc, inst.locker))
1099 (desc, inst.locker))
1099 # default to 600 seconds timeout
1100 # default to 600 seconds timeout
1100 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1101 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1101 releasefn, desc=desc)
1102 releasefn, desc=desc)
1102 if acquirefn:
1103 if acquirefn:
1103 acquirefn()
1104 acquirefn()
1104 return l
1105 return l
1105
1106
1106 def _afterlock(self, callback):
1107 def _afterlock(self, callback):
1107 """add a callback to the current repository lock.
1108 """add a callback to the current repository lock.
1108
1109
1109 The callback will be executed on lock release."""
1110 The callback will be executed on lock release."""
1110 l = self._lockref and self._lockref()
1111 l = self._lockref and self._lockref()
1111 if l:
1112 if l:
1112 l.postrelease.append(callback)
1113 l.postrelease.append(callback)
1113 else:
1114 else:
1114 callback()
1115 callback()
1115
1116
1116 def lock(self, wait=True):
1117 def lock(self, wait=True):
1117 '''Lock the repository store (.hg/store) and return a weak reference
1118 '''Lock the repository store (.hg/store) and return a weak reference
1118 to the lock. Use this before modifying the store (e.g. committing or
1119 to the lock. Use this before modifying the store (e.g. committing or
1119 stripping). If you are opening a transaction, get a lock as well.)'''
1120 stripping). If you are opening a transaction, get a lock as well.)'''
1120 l = self._lockref and self._lockref()
1121 l = self._lockref and self._lockref()
1121 if l is not None and l.held:
1122 if l is not None and l.held:
1122 l.lock()
1123 l.lock()
1123 return l
1124 return l
1124
1125
1125 def unlock():
1126 def unlock():
1126 self.store.write()
1127 self.store.write()
1127 if '_phasecache' in vars(self):
1128 if '_phasecache' in vars(self):
1128 self._phasecache.write()
1129 self._phasecache.write()
1129 for k, ce in self._filecache.items():
1130 for k, ce in self._filecache.items():
1130 if k == 'dirstate':
1131 if k == 'dirstate':
1131 continue
1132 continue
1132 ce.refresh()
1133 ce.refresh()
1133
1134
1134 l = self._lock(self.sjoin("lock"), wait, unlock,
1135 l = self._lock(self.sjoin("lock"), wait, unlock,
1135 self.invalidate, _('repository %s') % self.origroot)
1136 self.invalidate, _('repository %s') % self.origroot)
1136 self._lockref = weakref.ref(l)
1137 self._lockref = weakref.ref(l)
1137 return l
1138 return l
1138
1139
1139 def wlock(self, wait=True):
1140 def wlock(self, wait=True):
1140 '''Lock the non-store parts of the repository (everything under
1141 '''Lock the non-store parts of the repository (everything under
1141 .hg except .hg/store) and return a weak reference to the lock.
1142 .hg except .hg/store) and return a weak reference to the lock.
1142 Use this before modifying files in .hg.'''
1143 Use this before modifying files in .hg.'''
1143 l = self._wlockref and self._wlockref()
1144 l = self._wlockref and self._wlockref()
1144 if l is not None and l.held:
1145 if l is not None and l.held:
1145 l.lock()
1146 l.lock()
1146 return l
1147 return l
1147
1148
1148 def unlock():
1149 def unlock():
1149 self.dirstate.write()
1150 self.dirstate.write()
1150 ce = self._filecache.get('dirstate')
1151 ce = self._filecache.get('dirstate')
1151 if ce:
1152 if ce:
1152 ce.refresh()
1153 ce.refresh()
1153
1154
1154 l = self._lock(self.join("wlock"), wait, unlock,
1155 l = self._lock(self.join("wlock"), wait, unlock,
1155 self.invalidatedirstate, _('working directory of %s') %
1156 self.invalidatedirstate, _('working directory of %s') %
1156 self.origroot)
1157 self.origroot)
1157 self._wlockref = weakref.ref(l)
1158 self._wlockref = weakref.ref(l)
1158 return l
1159 return l
1159
1160
1160 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1161 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1161 """
1162 """
1162 commit an individual file as part of a larger transaction
1163 commit an individual file as part of a larger transaction
1163 """
1164 """
1164
1165
1165 fname = fctx.path()
1166 fname = fctx.path()
1166 text = fctx.data()
1167 text = fctx.data()
1167 flog = self.file(fname)
1168 flog = self.file(fname)
1168 fparent1 = manifest1.get(fname, nullid)
1169 fparent1 = manifest1.get(fname, nullid)
1169 fparent2 = fparent2o = manifest2.get(fname, nullid)
1170 fparent2 = fparent2o = manifest2.get(fname, nullid)
1170
1171
1171 meta = {}
1172 meta = {}
1172 copy = fctx.renamed()
1173 copy = fctx.renamed()
1173 if copy and copy[0] != fname:
1174 if copy and copy[0] != fname:
1174 # Mark the new revision of this file as a copy of another
1175 # Mark the new revision of this file as a copy of another
1175 # file. This copy data will effectively act as a parent
1176 # file. This copy data will effectively act as a parent
1176 # of this new revision. If this is a merge, the first
1177 # of this new revision. If this is a merge, the first
1177 # parent will be the nullid (meaning "look up the copy data")
1178 # parent will be the nullid (meaning "look up the copy data")
1178 # and the second one will be the other parent. For example:
1179 # and the second one will be the other parent. For example:
1179 #
1180 #
1180 # 0 --- 1 --- 3 rev1 changes file foo
1181 # 0 --- 1 --- 3 rev1 changes file foo
1181 # \ / rev2 renames foo to bar and changes it
1182 # \ / rev2 renames foo to bar and changes it
1182 # \- 2 -/ rev3 should have bar with all changes and
1183 # \- 2 -/ rev3 should have bar with all changes and
1183 # should record that bar descends from
1184 # should record that bar descends from
1184 # bar in rev2 and foo in rev1
1185 # bar in rev2 and foo in rev1
1185 #
1186 #
1186 # this allows this merge to succeed:
1187 # this allows this merge to succeed:
1187 #
1188 #
1188 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1189 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1189 # \ / merging rev3 and rev4 should use bar@rev2
1190 # \ / merging rev3 and rev4 should use bar@rev2
1190 # \- 2 --- 4 as the merge base
1191 # \- 2 --- 4 as the merge base
1191 #
1192 #
1192
1193
1193 cfname = copy[0]
1194 cfname = copy[0]
1194 crev = manifest1.get(cfname)
1195 crev = manifest1.get(cfname)
1195 newfparent = fparent2
1196 newfparent = fparent2
1196
1197
1197 if manifest2: # branch merge
1198 if manifest2: # branch merge
1198 if fparent2 == nullid or crev is None: # copied on remote side
1199 if fparent2 == nullid or crev is None: # copied on remote side
1199 if cfname in manifest2:
1200 if cfname in manifest2:
1200 crev = manifest2[cfname]
1201 crev = manifest2[cfname]
1201 newfparent = fparent1
1202 newfparent = fparent1
1202
1203
1203 # find source in nearest ancestor if we've lost track
1204 # find source in nearest ancestor if we've lost track
1204 if not crev:
1205 if not crev:
1205 self.ui.debug(" %s: searching for copy revision for %s\n" %
1206 self.ui.debug(" %s: searching for copy revision for %s\n" %
1206 (fname, cfname))
1207 (fname, cfname))
1207 for ancestor in self[None].ancestors():
1208 for ancestor in self[None].ancestors():
1208 if cfname in ancestor:
1209 if cfname in ancestor:
1209 crev = ancestor[cfname].filenode()
1210 crev = ancestor[cfname].filenode()
1210 break
1211 break
1211
1212
1212 if crev:
1213 if crev:
1213 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1214 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1214 meta["copy"] = cfname
1215 meta["copy"] = cfname
1215 meta["copyrev"] = hex(crev)
1216 meta["copyrev"] = hex(crev)
1216 fparent1, fparent2 = nullid, newfparent
1217 fparent1, fparent2 = nullid, newfparent
1217 else:
1218 else:
1218 self.ui.warn(_("warning: can't find ancestor for '%s' "
1219 self.ui.warn(_("warning: can't find ancestor for '%s' "
1219 "copied from '%s'!\n") % (fname, cfname))
1220 "copied from '%s'!\n") % (fname, cfname))
1220
1221
1221 elif fparent2 != nullid:
1222 elif fparent2 != nullid:
1222 # is one parent an ancestor of the other?
1223 # is one parent an ancestor of the other?
1223 fparentancestor = flog.ancestor(fparent1, fparent2)
1224 fparentancestor = flog.ancestor(fparent1, fparent2)
1224 if fparentancestor == fparent1:
1225 if fparentancestor == fparent1:
1225 fparent1, fparent2 = fparent2, nullid
1226 fparent1, fparent2 = fparent2, nullid
1226 elif fparentancestor == fparent2:
1227 elif fparentancestor == fparent2:
1227 fparent2 = nullid
1228 fparent2 = nullid
1228
1229
1229 # is the file changed?
1230 # is the file changed?
1230 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1231 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1231 changelist.append(fname)
1232 changelist.append(fname)
1232 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1233 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1233
1234
1234 # are just the flags changed during merge?
1235 # are just the flags changed during merge?
1235 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1236 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1236 changelist.append(fname)
1237 changelist.append(fname)
1237
1238
1238 return fparent1
1239 return fparent1
1239
1240
1240 def commit(self, text="", user=None, date=None, match=None, force=False,
1241 def commit(self, text="", user=None, date=None, match=None, force=False,
1241 editor=False, extra={}):
1242 editor=False, extra={}):
1242 """Add a new revision to current repository.
1243 """Add a new revision to current repository.
1243
1244
1244 Revision information is gathered from the working directory,
1245 Revision information is gathered from the working directory,
1245 match can be used to filter the committed files. If editor is
1246 match can be used to filter the committed files. If editor is
1246 supplied, it is called to get a commit message.
1247 supplied, it is called to get a commit message.
1247 """
1248 """
1248
1249
1249 def fail(f, msg):
1250 def fail(f, msg):
1250 raise util.Abort('%s: %s' % (f, msg))
1251 raise util.Abort('%s: %s' % (f, msg))
1251
1252
1252 if not match:
1253 if not match:
1253 match = matchmod.always(self.root, '')
1254 match = matchmod.always(self.root, '')
1254
1255
1255 if not force:
1256 if not force:
1256 vdirs = []
1257 vdirs = []
1257 match.dir = vdirs.append
1258 match.dir = vdirs.append
1258 match.bad = fail
1259 match.bad = fail
1259
1260
1260 wlock = self.wlock()
1261 wlock = self.wlock()
1261 try:
1262 try:
1262 wctx = self[None]
1263 wctx = self[None]
1263 merge = len(wctx.parents()) > 1
1264 merge = len(wctx.parents()) > 1
1264
1265
1265 if (not force and merge and match and
1266 if (not force and merge and match and
1266 (match.files() or match.anypats())):
1267 (match.files() or match.anypats())):
1267 raise util.Abort(_('cannot partially commit a merge '
1268 raise util.Abort(_('cannot partially commit a merge '
1268 '(do not specify files or patterns)'))
1269 '(do not specify files or patterns)'))
1269
1270
1270 changes = self.status(match=match, clean=force)
1271 changes = self.status(match=match, clean=force)
1271 if force:
1272 if force:
1272 changes[0].extend(changes[6]) # mq may commit unchanged files
1273 changes[0].extend(changes[6]) # mq may commit unchanged files
1273
1274
1274 # check subrepos
1275 # check subrepos
1275 subs = []
1276 subs = []
1276 commitsubs = set()
1277 commitsubs = set()
1277 newstate = wctx.substate.copy()
1278 newstate = wctx.substate.copy()
1278 # only manage subrepos and .hgsubstate if .hgsub is present
1279 # only manage subrepos and .hgsubstate if .hgsub is present
1279 if '.hgsub' in wctx:
1280 if '.hgsub' in wctx:
1280 # we'll decide whether to track this ourselves, thanks
1281 # we'll decide whether to track this ourselves, thanks
1281 if '.hgsubstate' in changes[0]:
1282 if '.hgsubstate' in changes[0]:
1282 changes[0].remove('.hgsubstate')
1283 changes[0].remove('.hgsubstate')
1283 if '.hgsubstate' in changes[2]:
1284 if '.hgsubstate' in changes[2]:
1284 changes[2].remove('.hgsubstate')
1285 changes[2].remove('.hgsubstate')
1285
1286
1286 # compare current state to last committed state
1287 # compare current state to last committed state
1287 # build new substate based on last committed state
1288 # build new substate based on last committed state
1288 oldstate = wctx.p1().substate
1289 oldstate = wctx.p1().substate
1289 for s in sorted(newstate.keys()):
1290 for s in sorted(newstate.keys()):
1290 if not match(s):
1291 if not match(s):
1291 # ignore working copy, use old state if present
1292 # ignore working copy, use old state if present
1292 if s in oldstate:
1293 if s in oldstate:
1293 newstate[s] = oldstate[s]
1294 newstate[s] = oldstate[s]
1294 continue
1295 continue
1295 if not force:
1296 if not force:
1296 raise util.Abort(
1297 raise util.Abort(
1297 _("commit with new subrepo %s excluded") % s)
1298 _("commit with new subrepo %s excluded") % s)
1298 if wctx.sub(s).dirty(True):
1299 if wctx.sub(s).dirty(True):
1299 if not self.ui.configbool('ui', 'commitsubrepos'):
1300 if not self.ui.configbool('ui', 'commitsubrepos'):
1300 raise util.Abort(
1301 raise util.Abort(
1301 _("uncommitted changes in subrepo %s") % s,
1302 _("uncommitted changes in subrepo %s") % s,
1302 hint=_("use --subrepos for recursive commit"))
1303 hint=_("use --subrepos for recursive commit"))
1303 subs.append(s)
1304 subs.append(s)
1304 commitsubs.add(s)
1305 commitsubs.add(s)
1305 else:
1306 else:
1306 bs = wctx.sub(s).basestate()
1307 bs = wctx.sub(s).basestate()
1307 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1308 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1308 if oldstate.get(s, (None, None, None))[1] != bs:
1309 if oldstate.get(s, (None, None, None))[1] != bs:
1309 subs.append(s)
1310 subs.append(s)
1310
1311
1311 # check for removed subrepos
1312 # check for removed subrepos
1312 for p in wctx.parents():
1313 for p in wctx.parents():
1313 r = [s for s in p.substate if s not in newstate]
1314 r = [s for s in p.substate if s not in newstate]
1314 subs += [s for s in r if match(s)]
1315 subs += [s for s in r if match(s)]
1315 if subs:
1316 if subs:
1316 if (not match('.hgsub') and
1317 if (not match('.hgsub') and
1317 '.hgsub' in (wctx.modified() + wctx.added())):
1318 '.hgsub' in (wctx.modified() + wctx.added())):
1318 raise util.Abort(
1319 raise util.Abort(
1319 _("can't commit subrepos without .hgsub"))
1320 _("can't commit subrepos without .hgsub"))
1320 changes[0].insert(0, '.hgsubstate')
1321 changes[0].insert(0, '.hgsubstate')
1321
1322
1322 elif '.hgsub' in changes[2]:
1323 elif '.hgsub' in changes[2]:
1323 # clean up .hgsubstate when .hgsub is removed
1324 # clean up .hgsubstate when .hgsub is removed
1324 if ('.hgsubstate' in wctx and
1325 if ('.hgsubstate' in wctx and
1325 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1326 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1326 changes[2].insert(0, '.hgsubstate')
1327 changes[2].insert(0, '.hgsubstate')
1327
1328
1328 # make sure all explicit patterns are matched
1329 # make sure all explicit patterns are matched
1329 if not force and match.files():
1330 if not force and match.files():
1330 matched = set(changes[0] + changes[1] + changes[2])
1331 matched = set(changes[0] + changes[1] + changes[2])
1331
1332
1332 for f in match.files():
1333 for f in match.files():
1333 f = self.dirstate.normalize(f)
1334 f = self.dirstate.normalize(f)
1334 if f == '.' or f in matched or f in wctx.substate:
1335 if f == '.' or f in matched or f in wctx.substate:
1335 continue
1336 continue
1336 if f in changes[3]: # missing
1337 if f in changes[3]: # missing
1337 fail(f, _('file not found!'))
1338 fail(f, _('file not found!'))
1338 if f in vdirs: # visited directory
1339 if f in vdirs: # visited directory
1339 d = f + '/'
1340 d = f + '/'
1340 for mf in matched:
1341 for mf in matched:
1341 if mf.startswith(d):
1342 if mf.startswith(d):
1342 break
1343 break
1343 else:
1344 else:
1344 fail(f, _("no match under directory!"))
1345 fail(f, _("no match under directory!"))
1345 elif f not in self.dirstate:
1346 elif f not in self.dirstate:
1346 fail(f, _("file not tracked!"))
1347 fail(f, _("file not tracked!"))
1347
1348
1348 if (not force and not extra.get("close") and not merge
1349 if (not force and not extra.get("close") and not merge
1349 and not (changes[0] or changes[1] or changes[2])
1350 and not (changes[0] or changes[1] or changes[2])
1350 and wctx.branch() == wctx.p1().branch()):
1351 and wctx.branch() == wctx.p1().branch()):
1351 return None
1352 return None
1352
1353
1353 if merge and changes[3]:
1354 if merge and changes[3]:
1354 raise util.Abort(_("cannot commit merge with missing files"))
1355 raise util.Abort(_("cannot commit merge with missing files"))
1355
1356
1356 ms = mergemod.mergestate(self)
1357 ms = mergemod.mergestate(self)
1357 for f in changes[0]:
1358 for f in changes[0]:
1358 if f in ms and ms[f] == 'u':
1359 if f in ms and ms[f] == 'u':
1359 raise util.Abort(_("unresolved merge conflicts "
1360 raise util.Abort(_("unresolved merge conflicts "
1360 "(see hg help resolve)"))
1361 "(see hg help resolve)"))
1361
1362
1362 cctx = context.workingctx(self, text, user, date, extra, changes)
1363 cctx = context.workingctx(self, text, user, date, extra, changes)
1363 if editor:
1364 if editor:
1364 cctx._text = editor(self, cctx, subs)
1365 cctx._text = editor(self, cctx, subs)
1365 edited = (text != cctx._text)
1366 edited = (text != cctx._text)
1366
1367
1367 # commit subs and write new state
1368 # commit subs and write new state
1368 if subs:
1369 if subs:
1369 for s in sorted(commitsubs):
1370 for s in sorted(commitsubs):
1370 sub = wctx.sub(s)
1371 sub = wctx.sub(s)
1371 self.ui.status(_('committing subrepository %s\n') %
1372 self.ui.status(_('committing subrepository %s\n') %
1372 subrepo.subrelpath(sub))
1373 subrepo.subrelpath(sub))
1373 sr = sub.commit(cctx._text, user, date)
1374 sr = sub.commit(cctx._text, user, date)
1374 newstate[s] = (newstate[s][0], sr)
1375 newstate[s] = (newstate[s][0], sr)
1375 subrepo.writestate(self, newstate)
1376 subrepo.writestate(self, newstate)
1376
1377
1377 # Save commit message in case this transaction gets rolled back
1378 # Save commit message in case this transaction gets rolled back
1378 # (e.g. by a pretxncommit hook). Leave the content alone on
1379 # (e.g. by a pretxncommit hook). Leave the content alone on
1379 # the assumption that the user will use the same editor again.
1380 # the assumption that the user will use the same editor again.
1380 msgfn = self.savecommitmessage(cctx._text)
1381 msgfn = self.savecommitmessage(cctx._text)
1381
1382
1382 p1, p2 = self.dirstate.parents()
1383 p1, p2 = self.dirstate.parents()
1383 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1384 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1384 try:
1385 try:
1385 self.hook("precommit", throw=True, parent1=hookp1,
1386 self.hook("precommit", throw=True, parent1=hookp1,
1386 parent2=hookp2)
1387 parent2=hookp2)
1387 ret = self.commitctx(cctx, True)
1388 ret = self.commitctx(cctx, True)
1388 except: # re-raises
1389 except: # re-raises
1389 if edited:
1390 if edited:
1390 self.ui.write(
1391 self.ui.write(
1391 _('note: commit message saved in %s\n') % msgfn)
1392 _('note: commit message saved in %s\n') % msgfn)
1392 raise
1393 raise
1393
1394
1394 # update bookmarks, dirstate and mergestate
1395 # update bookmarks, dirstate and mergestate
1395 bookmarks.update(self, [p1, p2], ret)
1396 bookmarks.update(self, [p1, p2], ret)
1396 for f in changes[0] + changes[1]:
1397 for f in changes[0] + changes[1]:
1397 self.dirstate.normal(f)
1398 self.dirstate.normal(f)
1398 for f in changes[2]:
1399 for f in changes[2]:
1399 self.dirstate.drop(f)
1400 self.dirstate.drop(f)
1400 self.dirstate.setparents(ret)
1401 self.dirstate.setparents(ret)
1401 ms.reset()
1402 ms.reset()
1402 finally:
1403 finally:
1403 wlock.release()
1404 wlock.release()
1404
1405
1405 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1406 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1406 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1407 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1407 self._afterlock(commithook)
1408 self._afterlock(commithook)
1408 return ret
1409 return ret
1409
1410
1410 def commitctx(self, ctx, error=False):
1411 def commitctx(self, ctx, error=False):
1411 """Add a new revision to current repository.
1412 """Add a new revision to current repository.
1412 Revision information is passed via the context argument.
1413 Revision information is passed via the context argument.
1413 """
1414 """
1414
1415
1415 tr = lock = None
1416 tr = lock = None
1416 removed = list(ctx.removed())
1417 removed = list(ctx.removed())
1417 p1, p2 = ctx.p1(), ctx.p2()
1418 p1, p2 = ctx.p1(), ctx.p2()
1418 user = ctx.user()
1419 user = ctx.user()
1419
1420
1420 lock = self.lock()
1421 lock = self.lock()
1421 try:
1422 try:
1422 tr = self.transaction("commit")
1423 tr = self.transaction("commit")
1423 trp = weakref.proxy(tr)
1424 trp = weakref.proxy(tr)
1424
1425
1425 if ctx.files():
1426 if ctx.files():
1426 m1 = p1.manifest().copy()
1427 m1 = p1.manifest().copy()
1427 m2 = p2.manifest()
1428 m2 = p2.manifest()
1428
1429
1429 # check in files
1430 # check in files
1430 new = {}
1431 new = {}
1431 changed = []
1432 changed = []
1432 linkrev = len(self)
1433 linkrev = len(self)
1433 for f in sorted(ctx.modified() + ctx.added()):
1434 for f in sorted(ctx.modified() + ctx.added()):
1434 self.ui.note(f + "\n")
1435 self.ui.note(f + "\n")
1435 try:
1436 try:
1436 fctx = ctx[f]
1437 fctx = ctx[f]
1437 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1438 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1438 changed)
1439 changed)
1439 m1.set(f, fctx.flags())
1440 m1.set(f, fctx.flags())
1440 except OSError, inst:
1441 except OSError, inst:
1441 self.ui.warn(_("trouble committing %s!\n") % f)
1442 self.ui.warn(_("trouble committing %s!\n") % f)
1442 raise
1443 raise
1443 except IOError, inst:
1444 except IOError, inst:
1444 errcode = getattr(inst, 'errno', errno.ENOENT)
1445 errcode = getattr(inst, 'errno', errno.ENOENT)
1445 if error or errcode and errcode != errno.ENOENT:
1446 if error or errcode and errcode != errno.ENOENT:
1446 self.ui.warn(_("trouble committing %s!\n") % f)
1447 self.ui.warn(_("trouble committing %s!\n") % f)
1447 raise
1448 raise
1448 else:
1449 else:
1449 removed.append(f)
1450 removed.append(f)
1450
1451
1451 # update manifest
1452 # update manifest
1452 m1.update(new)
1453 m1.update(new)
1453 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1454 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1454 drop = [f for f in removed if f in m1]
1455 drop = [f for f in removed if f in m1]
1455 for f in drop:
1456 for f in drop:
1456 del m1[f]
1457 del m1[f]
1457 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1458 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1458 p2.manifestnode(), (new, drop))
1459 p2.manifestnode(), (new, drop))
1459 files = changed + removed
1460 files = changed + removed
1460 else:
1461 else:
1461 mn = p1.manifestnode()
1462 mn = p1.manifestnode()
1462 files = []
1463 files = []
1463
1464
1464 # update changelog
1465 # update changelog
1465 self.changelog.delayupdate()
1466 self.changelog.delayupdate()
1466 n = self.changelog.add(mn, files, ctx.description(),
1467 n = self.changelog.add(mn, files, ctx.description(),
1467 trp, p1.node(), p2.node(),
1468 trp, p1.node(), p2.node(),
1468 user, ctx.date(), ctx.extra().copy())
1469 user, ctx.date(), ctx.extra().copy())
1469 p = lambda: self.changelog.writepending() and self.root or ""
1470 p = lambda: self.changelog.writepending() and self.root or ""
1470 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1471 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1471 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1472 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1472 parent2=xp2, pending=p)
1473 parent2=xp2, pending=p)
1473 self.changelog.finalize(trp)
1474 self.changelog.finalize(trp)
1474 # set the new commit is proper phase
1475 # set the new commit is proper phase
1475 targetphase = phases.newcommitphase(self.ui)
1476 targetphase = phases.newcommitphase(self.ui)
1476 if targetphase:
1477 if targetphase:
1477 # retract boundary do not alter parent changeset.
1478 # retract boundary do not alter parent changeset.
1478 # if a parent have higher the resulting phase will
1479 # if a parent have higher the resulting phase will
1479 # be compliant anyway
1480 # be compliant anyway
1480 #
1481 #
1481 # if minimal phase was 0 we don't need to retract anything
1482 # if minimal phase was 0 we don't need to retract anything
1482 phases.retractboundary(self, targetphase, [n])
1483 phases.retractboundary(self, targetphase, [n])
1483 tr.close()
1484 tr.close()
1484 self.updatebranchcache()
1485 self.updatebranchcache()
1485 return n
1486 return n
1486 finally:
1487 finally:
1487 if tr:
1488 if tr:
1488 tr.release()
1489 tr.release()
1489 lock.release()
1490 lock.release()
1490
1491
1491 def destroyed(self, newheadnodes=None):
1492 def destroyed(self, newheadnodes=None):
1492 '''Inform the repository that nodes have been destroyed.
1493 '''Inform the repository that nodes have been destroyed.
1493 Intended for use by strip and rollback, so there's a common
1494 Intended for use by strip and rollback, so there's a common
1494 place for anything that has to be done after destroying history.
1495 place for anything that has to be done after destroying history.
1495
1496
1496 If you know the branchheadcache was uptodate before nodes were removed
1497 If you know the branchheadcache was uptodate before nodes were removed
1497 and you also know the set of candidate new heads that may have resulted
1498 and you also know the set of candidate new heads that may have resulted
1498 from the destruction, you can set newheadnodes. This will enable the
1499 from the destruction, you can set newheadnodes. This will enable the
1499 code to update the branchheads cache, rather than having future code
1500 code to update the branchheads cache, rather than having future code
1500 decide it's invalid and regenerating it from scratch.
1501 decide it's invalid and regenerating it from scratch.
1501 '''
1502 '''
1502 # If we have info, newheadnodes, on how to update the branch cache, do
1503 # If we have info, newheadnodes, on how to update the branch cache, do
1503 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1504 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1504 # will be caught the next time it is read.
1505 # will be caught the next time it is read.
1505 if newheadnodes:
1506 if newheadnodes:
1506 tiprev = len(self) - 1
1507 tiprev = len(self) - 1
1507 ctxgen = (self[node] for node in newheadnodes
1508 ctxgen = (self[node] for node in newheadnodes
1508 if self.changelog.hasnode(node))
1509 if self.changelog.hasnode(node))
1509 self._updatebranchcache(self._branchcache, ctxgen)
1510 self._updatebranchcache(self._branchcache, ctxgen)
1510 self._writebranchcache(self._branchcache, self.changelog.tip(),
1511 self._writebranchcache(self._branchcache, self.changelog.tip(),
1511 tiprev)
1512 tiprev)
1512
1513
1513 # Ensure the persistent tag cache is updated. Doing it now
1514 # Ensure the persistent tag cache is updated. Doing it now
1514 # means that the tag cache only has to worry about destroyed
1515 # means that the tag cache only has to worry about destroyed
1515 # heads immediately after a strip/rollback. That in turn
1516 # heads immediately after a strip/rollback. That in turn
1516 # guarantees that "cachetip == currenttip" (comparing both rev
1517 # guarantees that "cachetip == currenttip" (comparing both rev
1517 # and node) always means no nodes have been added or destroyed.
1518 # and node) always means no nodes have been added or destroyed.
1518
1519
1519 # XXX this is suboptimal when qrefresh'ing: we strip the current
1520 # XXX this is suboptimal when qrefresh'ing: we strip the current
1520 # head, refresh the tag cache, then immediately add a new head.
1521 # head, refresh the tag cache, then immediately add a new head.
1521 # But I think doing it this way is necessary for the "instant
1522 # But I think doing it this way is necessary for the "instant
1522 # tag cache retrieval" case to work.
1523 # tag cache retrieval" case to work.
1523 self.invalidatecaches()
1524 self.invalidatecaches()
1524
1525
1525 # Discard all cache entries to force reloading everything.
1526 # Discard all cache entries to force reloading everything.
1526 self._filecache.clear()
1527 self._filecache.clear()
1527
1528
1528 def walk(self, match, node=None):
1529 def walk(self, match, node=None):
1529 '''
1530 '''
1530 walk recursively through the directory tree or a given
1531 walk recursively through the directory tree or a given
1531 changeset, finding all files matched by the match
1532 changeset, finding all files matched by the match
1532 function
1533 function
1533 '''
1534 '''
1534 return self[node].walk(match)
1535 return self[node].walk(match)
1535
1536
1536 def status(self, node1='.', node2=None, match=None,
1537 def status(self, node1='.', node2=None, match=None,
1537 ignored=False, clean=False, unknown=False,
1538 ignored=False, clean=False, unknown=False,
1538 listsubrepos=False):
1539 listsubrepos=False):
1539 """return status of files between two nodes or node and working
1540 """return status of files between two nodes or node and working
1540 directory.
1541 directory.
1541
1542
1542 If node1 is None, use the first dirstate parent instead.
1543 If node1 is None, use the first dirstate parent instead.
1543 If node2 is None, compare node1 with working directory.
1544 If node2 is None, compare node1 with working directory.
1544 """
1545 """
1545
1546
1546 def mfmatches(ctx):
1547 def mfmatches(ctx):
1547 mf = ctx.manifest().copy()
1548 mf = ctx.manifest().copy()
1548 if match.always():
1549 if match.always():
1549 return mf
1550 return mf
1550 for fn in mf.keys():
1551 for fn in mf.keys():
1551 if not match(fn):
1552 if not match(fn):
1552 del mf[fn]
1553 del mf[fn]
1553 return mf
1554 return mf
1554
1555
1555 if isinstance(node1, context.changectx):
1556 if isinstance(node1, context.changectx):
1556 ctx1 = node1
1557 ctx1 = node1
1557 else:
1558 else:
1558 ctx1 = self[node1]
1559 ctx1 = self[node1]
1559 if isinstance(node2, context.changectx):
1560 if isinstance(node2, context.changectx):
1560 ctx2 = node2
1561 ctx2 = node2
1561 else:
1562 else:
1562 ctx2 = self[node2]
1563 ctx2 = self[node2]
1563
1564
1564 working = ctx2.rev() is None
1565 working = ctx2.rev() is None
1565 parentworking = working and ctx1 == self['.']
1566 parentworking = working and ctx1 == self['.']
1566 match = match or matchmod.always(self.root, self.getcwd())
1567 match = match or matchmod.always(self.root, self.getcwd())
1567 listignored, listclean, listunknown = ignored, clean, unknown
1568 listignored, listclean, listunknown = ignored, clean, unknown
1568
1569
1569 # load earliest manifest first for caching reasons
1570 # load earliest manifest first for caching reasons
1570 if not working and ctx2.rev() < ctx1.rev():
1571 if not working and ctx2.rev() < ctx1.rev():
1571 ctx2.manifest()
1572 ctx2.manifest()
1572
1573
1573 if not parentworking:
1574 if not parentworking:
1574 def bad(f, msg):
1575 def bad(f, msg):
1575 # 'f' may be a directory pattern from 'match.files()',
1576 # 'f' may be a directory pattern from 'match.files()',
1576 # so 'f not in ctx1' is not enough
1577 # so 'f not in ctx1' is not enough
1577 if f not in ctx1 and f not in ctx1.dirs():
1578 if f not in ctx1 and f not in ctx1.dirs():
1578 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1579 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1579 match.bad = bad
1580 match.bad = bad
1580
1581
1581 if working: # we need to scan the working dir
1582 if working: # we need to scan the working dir
1582 subrepos = []
1583 subrepos = []
1583 if '.hgsub' in self.dirstate:
1584 if '.hgsub' in self.dirstate:
1584 subrepos = ctx2.substate.keys()
1585 subrepos = ctx2.substate.keys()
1585 s = self.dirstate.status(match, subrepos, listignored,
1586 s = self.dirstate.status(match, subrepos, listignored,
1586 listclean, listunknown)
1587 listclean, listunknown)
1587 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1588 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1588
1589
1589 # check for any possibly clean files
1590 # check for any possibly clean files
1590 if parentworking and cmp:
1591 if parentworking and cmp:
1591 fixup = []
1592 fixup = []
1592 # do a full compare of any files that might have changed
1593 # do a full compare of any files that might have changed
1593 for f in sorted(cmp):
1594 for f in sorted(cmp):
1594 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1595 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1595 or ctx1[f].cmp(ctx2[f])):
1596 or ctx1[f].cmp(ctx2[f])):
1596 modified.append(f)
1597 modified.append(f)
1597 else:
1598 else:
1598 fixup.append(f)
1599 fixup.append(f)
1599
1600
1600 # update dirstate for files that are actually clean
1601 # update dirstate for files that are actually clean
1601 if fixup:
1602 if fixup:
1602 if listclean:
1603 if listclean:
1603 clean += fixup
1604 clean += fixup
1604
1605
1605 try:
1606 try:
1606 # updating the dirstate is optional
1607 # updating the dirstate is optional
1607 # so we don't wait on the lock
1608 # so we don't wait on the lock
1608 wlock = self.wlock(False)
1609 wlock = self.wlock(False)
1609 try:
1610 try:
1610 for f in fixup:
1611 for f in fixup:
1611 self.dirstate.normal(f)
1612 self.dirstate.normal(f)
1612 finally:
1613 finally:
1613 wlock.release()
1614 wlock.release()
1614 except error.LockError:
1615 except error.LockError:
1615 pass
1616 pass
1616
1617
1617 if not parentworking:
1618 if not parentworking:
1618 mf1 = mfmatches(ctx1)
1619 mf1 = mfmatches(ctx1)
1619 if working:
1620 if working:
1620 # we are comparing working dir against non-parent
1621 # we are comparing working dir against non-parent
1621 # generate a pseudo-manifest for the working dir
1622 # generate a pseudo-manifest for the working dir
1622 mf2 = mfmatches(self['.'])
1623 mf2 = mfmatches(self['.'])
1623 for f in cmp + modified + added:
1624 for f in cmp + modified + added:
1624 mf2[f] = None
1625 mf2[f] = None
1625 mf2.set(f, ctx2.flags(f))
1626 mf2.set(f, ctx2.flags(f))
1626 for f in removed:
1627 for f in removed:
1627 if f in mf2:
1628 if f in mf2:
1628 del mf2[f]
1629 del mf2[f]
1629 else:
1630 else:
1630 # we are comparing two revisions
1631 # we are comparing two revisions
1631 deleted, unknown, ignored = [], [], []
1632 deleted, unknown, ignored = [], [], []
1632 mf2 = mfmatches(ctx2)
1633 mf2 = mfmatches(ctx2)
1633
1634
1634 modified, added, clean = [], [], []
1635 modified, added, clean = [], [], []
1635 withflags = mf1.withflags() | mf2.withflags()
1636 withflags = mf1.withflags() | mf2.withflags()
1636 for fn in mf2:
1637 for fn in mf2:
1637 if fn in mf1:
1638 if fn in mf1:
1638 if (fn not in deleted and
1639 if (fn not in deleted and
1639 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1640 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1640 (mf1[fn] != mf2[fn] and
1641 (mf1[fn] != mf2[fn] and
1641 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1642 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1642 modified.append(fn)
1643 modified.append(fn)
1643 elif listclean:
1644 elif listclean:
1644 clean.append(fn)
1645 clean.append(fn)
1645 del mf1[fn]
1646 del mf1[fn]
1646 elif fn not in deleted:
1647 elif fn not in deleted:
1647 added.append(fn)
1648 added.append(fn)
1648 removed = mf1.keys()
1649 removed = mf1.keys()
1649
1650
1650 if working and modified and not self.dirstate._checklink:
1651 if working and modified and not self.dirstate._checklink:
1651 # Symlink placeholders may get non-symlink-like contents
1652 # Symlink placeholders may get non-symlink-like contents
1652 # via user error or dereferencing by NFS or Samba servers,
1653 # via user error or dereferencing by NFS or Samba servers,
1653 # so we filter out any placeholders that don't look like a
1654 # so we filter out any placeholders that don't look like a
1654 # symlink
1655 # symlink
1655 sane = []
1656 sane = []
1656 for f in modified:
1657 for f in modified:
1657 if ctx2.flags(f) == 'l':
1658 if ctx2.flags(f) == 'l':
1658 d = ctx2[f].data()
1659 d = ctx2[f].data()
1659 if len(d) >= 1024 or '\n' in d or util.binary(d):
1660 if len(d) >= 1024 or '\n' in d or util.binary(d):
1660 self.ui.debug('ignoring suspect symlink placeholder'
1661 self.ui.debug('ignoring suspect symlink placeholder'
1661 ' "%s"\n' % f)
1662 ' "%s"\n' % f)
1662 continue
1663 continue
1663 sane.append(f)
1664 sane.append(f)
1664 modified = sane
1665 modified = sane
1665
1666
1666 r = modified, added, removed, deleted, unknown, ignored, clean
1667 r = modified, added, removed, deleted, unknown, ignored, clean
1667
1668
1668 if listsubrepos:
1669 if listsubrepos:
1669 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1670 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1670 if working:
1671 if working:
1671 rev2 = None
1672 rev2 = None
1672 else:
1673 else:
1673 rev2 = ctx2.substate[subpath][1]
1674 rev2 = ctx2.substate[subpath][1]
1674 try:
1675 try:
1675 submatch = matchmod.narrowmatcher(subpath, match)
1676 submatch = matchmod.narrowmatcher(subpath, match)
1676 s = sub.status(rev2, match=submatch, ignored=listignored,
1677 s = sub.status(rev2, match=submatch, ignored=listignored,
1677 clean=listclean, unknown=listunknown,
1678 clean=listclean, unknown=listunknown,
1678 listsubrepos=True)
1679 listsubrepos=True)
1679 for rfiles, sfiles in zip(r, s):
1680 for rfiles, sfiles in zip(r, s):
1680 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1681 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1681 except error.LookupError:
1682 except error.LookupError:
1682 self.ui.status(_("skipping missing subrepository: %s\n")
1683 self.ui.status(_("skipping missing subrepository: %s\n")
1683 % subpath)
1684 % subpath)
1684
1685
1685 for l in r:
1686 for l in r:
1686 l.sort()
1687 l.sort()
1687 return r
1688 return r
1688
1689
1689 def heads(self, start=None):
1690 def heads(self, start=None):
1690 heads = self.changelog.heads(start)
1691 heads = self.changelog.heads(start)
1691 # sort the output in rev descending order
1692 # sort the output in rev descending order
1692 return sorted(heads, key=self.changelog.rev, reverse=True)
1693 return sorted(heads, key=self.changelog.rev, reverse=True)
1693
1694
1694 def branchheads(self, branch=None, start=None, closed=False):
1695 def branchheads(self, branch=None, start=None, closed=False):
1695 '''return a (possibly filtered) list of heads for the given branch
1696 '''return a (possibly filtered) list of heads for the given branch
1696
1697
1697 Heads are returned in topological order, from newest to oldest.
1698 Heads are returned in topological order, from newest to oldest.
1698 If branch is None, use the dirstate branch.
1699 If branch is None, use the dirstate branch.
1699 If start is not None, return only heads reachable from start.
1700 If start is not None, return only heads reachable from start.
1700 If closed is True, return heads that are marked as closed as well.
1701 If closed is True, return heads that are marked as closed as well.
1701 '''
1702 '''
1702 if branch is None:
1703 if branch is None:
1703 branch = self[None].branch()
1704 branch = self[None].branch()
1704 branches = self.branchmap()
1705 branches = self.branchmap()
1705 if branch not in branches:
1706 if branch not in branches:
1706 return []
1707 return []
1707 # the cache returns heads ordered lowest to highest
1708 # the cache returns heads ordered lowest to highest
1708 bheads = list(reversed(branches[branch]))
1709 bheads = list(reversed(branches[branch]))
1709 if start is not None:
1710 if start is not None:
1710 # filter out the heads that cannot be reached from startrev
1711 # filter out the heads that cannot be reached from startrev
1711 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1712 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1712 bheads = [h for h in bheads if h in fbheads]
1713 bheads = [h for h in bheads if h in fbheads]
1713 if not closed:
1714 if not closed:
1714 bheads = [h for h in bheads if not self[h].closesbranch()]
1715 bheads = [h for h in bheads if not self[h].closesbranch()]
1715 return bheads
1716 return bheads
1716
1717
1717 def branches(self, nodes):
1718 def branches(self, nodes):
1718 if not nodes:
1719 if not nodes:
1719 nodes = [self.changelog.tip()]
1720 nodes = [self.changelog.tip()]
1720 b = []
1721 b = []
1721 for n in nodes:
1722 for n in nodes:
1722 t = n
1723 t = n
1723 while True:
1724 while True:
1724 p = self.changelog.parents(n)
1725 p = self.changelog.parents(n)
1725 if p[1] != nullid or p[0] == nullid:
1726 if p[1] != nullid or p[0] == nullid:
1726 b.append((t, n, p[0], p[1]))
1727 b.append((t, n, p[0], p[1]))
1727 break
1728 break
1728 n = p[0]
1729 n = p[0]
1729 return b
1730 return b
1730
1731
1731 def between(self, pairs):
1732 def between(self, pairs):
1732 r = []
1733 r = []
1733
1734
1734 for top, bottom in pairs:
1735 for top, bottom in pairs:
1735 n, l, i = top, [], 0
1736 n, l, i = top, [], 0
1736 f = 1
1737 f = 1
1737
1738
1738 while n != bottom and n != nullid:
1739 while n != bottom and n != nullid:
1739 p = self.changelog.parents(n)[0]
1740 p = self.changelog.parents(n)[0]
1740 if i == f:
1741 if i == f:
1741 l.append(n)
1742 l.append(n)
1742 f = f * 2
1743 f = f * 2
1743 n = p
1744 n = p
1744 i += 1
1745 i += 1
1745
1746
1746 r.append(l)
1747 r.append(l)
1747
1748
1748 return r
1749 return r
1749
1750
1750 def pull(self, remote, heads=None, force=False):
1751 def pull(self, remote, heads=None, force=False):
1751 # don't open transaction for nothing or you break future useful
1752 # don't open transaction for nothing or you break future useful
1752 # rollback call
1753 # rollback call
1753 tr = None
1754 tr = None
1754 trname = 'pull\n' + util.hidepassword(remote.url())
1755 trname = 'pull\n' + util.hidepassword(remote.url())
1755 lock = self.lock()
1756 lock = self.lock()
1756 try:
1757 try:
1757 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1758 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1758 force=force)
1759 force=force)
1759 common, fetch, rheads = tmp
1760 common, fetch, rheads = tmp
1760 if not fetch:
1761 if not fetch:
1761 self.ui.status(_("no changes found\n"))
1762 self.ui.status(_("no changes found\n"))
1762 added = []
1763 added = []
1763 result = 0
1764 result = 0
1764 else:
1765 else:
1765 tr = self.transaction(trname)
1766 tr = self.transaction(trname)
1766 if heads is None and list(common) == [nullid]:
1767 if heads is None and list(common) == [nullid]:
1767 self.ui.status(_("requesting all changes\n"))
1768 self.ui.status(_("requesting all changes\n"))
1768 elif heads is None and remote.capable('changegroupsubset'):
1769 elif heads is None and remote.capable('changegroupsubset'):
1769 # issue1320, avoid a race if remote changed after discovery
1770 # issue1320, avoid a race if remote changed after discovery
1770 heads = rheads
1771 heads = rheads
1771
1772
1772 if remote.capable('getbundle'):
1773 if remote.capable('getbundle'):
1773 cg = remote.getbundle('pull', common=common,
1774 cg = remote.getbundle('pull', common=common,
1774 heads=heads or rheads)
1775 heads=heads or rheads)
1775 elif heads is None:
1776 elif heads is None:
1776 cg = remote.changegroup(fetch, 'pull')
1777 cg = remote.changegroup(fetch, 'pull')
1777 elif not remote.capable('changegroupsubset'):
1778 elif not remote.capable('changegroupsubset'):
1778 raise util.Abort(_("partial pull cannot be done because "
1779 raise util.Abort(_("partial pull cannot be done because "
1779 "other repository doesn't support "
1780 "other repository doesn't support "
1780 "changegroupsubset."))
1781 "changegroupsubset."))
1781 else:
1782 else:
1782 cg = remote.changegroupsubset(fetch, heads, 'pull')
1783 cg = remote.changegroupsubset(fetch, heads, 'pull')
1783 clstart = len(self.changelog)
1784 clstart = len(self.changelog)
1784 result = self.addchangegroup(cg, 'pull', remote.url())
1785 result = self.addchangegroup(cg, 'pull', remote.url())
1785 clend = len(self.changelog)
1786 clend = len(self.changelog)
1786 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1787 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1787
1788
1788 # compute target subset
1789 # compute target subset
1789 if heads is None:
1790 if heads is None:
1790 # We pulled every thing possible
1791 # We pulled every thing possible
1791 # sync on everything common
1792 # sync on everything common
1792 subset = common + added
1793 subset = common + added
1793 else:
1794 else:
1794 # We pulled a specific subset
1795 # We pulled a specific subset
1795 # sync on this subset
1796 # sync on this subset
1796 subset = heads
1797 subset = heads
1797
1798
1798 # Get remote phases data from remote
1799 # Get remote phases data from remote
1799 remotephases = remote.listkeys('phases')
1800 remotephases = remote.listkeys('phases')
1800 publishing = bool(remotephases.get('publishing', False))
1801 publishing = bool(remotephases.get('publishing', False))
1801 if remotephases and not publishing:
1802 if remotephases and not publishing:
1802 # remote is new and unpublishing
1803 # remote is new and unpublishing
1803 pheads, _dr = phases.analyzeremotephases(self, subset,
1804 pheads, _dr = phases.analyzeremotephases(self, subset,
1804 remotephases)
1805 remotephases)
1805 phases.advanceboundary(self, phases.public, pheads)
1806 phases.advanceboundary(self, phases.public, pheads)
1806 phases.advanceboundary(self, phases.draft, subset)
1807 phases.advanceboundary(self, phases.draft, subset)
1807 else:
1808 else:
1808 # Remote is old or publishing all common changesets
1809 # Remote is old or publishing all common changesets
1809 # should be seen as public
1810 # should be seen as public
1810 phases.advanceboundary(self, phases.public, subset)
1811 phases.advanceboundary(self, phases.public, subset)
1811
1812
1812 if obsolete._enabled:
1813 if obsolete._enabled:
1813 self.ui.debug('fetching remote obsolete markers\n')
1814 self.ui.debug('fetching remote obsolete markers\n')
1814 remoteobs = remote.listkeys('obsolete')
1815 remoteobs = remote.listkeys('obsolete')
1815 if 'dump0' in remoteobs:
1816 if 'dump0' in remoteobs:
1816 if tr is None:
1817 if tr is None:
1817 tr = self.transaction(trname)
1818 tr = self.transaction(trname)
1818 for key in sorted(remoteobs, reverse=True):
1819 for key in sorted(remoteobs, reverse=True):
1819 if key.startswith('dump'):
1820 if key.startswith('dump'):
1820 data = base85.b85decode(remoteobs[key])
1821 data = base85.b85decode(remoteobs[key])
1821 self.obsstore.mergemarkers(tr, data)
1822 self.obsstore.mergemarkers(tr, data)
1822 if tr is not None:
1823 if tr is not None:
1823 tr.close()
1824 tr.close()
1824 finally:
1825 finally:
1825 if tr is not None:
1826 if tr is not None:
1826 tr.release()
1827 tr.release()
1827 lock.release()
1828 lock.release()
1828
1829
1829 return result
1830 return result
1830
1831
1831 def checkpush(self, force, revs):
1832 def checkpush(self, force, revs):
1832 """Extensions can override this function if additional checks have
1833 """Extensions can override this function if additional checks have
1833 to be performed before pushing, or call it if they override push
1834 to be performed before pushing, or call it if they override push
1834 command.
1835 command.
1835 """
1836 """
1836 pass
1837 pass
1837
1838
1838 def push(self, remote, force=False, revs=None, newbranch=False):
1839 def push(self, remote, force=False, revs=None, newbranch=False):
1839 '''Push outgoing changesets (limited by revs) from the current
1840 '''Push outgoing changesets (limited by revs) from the current
1840 repository to remote. Return an integer:
1841 repository to remote. Return an integer:
1841 - None means nothing to push
1842 - None means nothing to push
1842 - 0 means HTTP error
1843 - 0 means HTTP error
1843 - 1 means we pushed and remote head count is unchanged *or*
1844 - 1 means we pushed and remote head count is unchanged *or*
1844 we have outgoing changesets but refused to push
1845 we have outgoing changesets but refused to push
1845 - other values as described by addchangegroup()
1846 - other values as described by addchangegroup()
1846 '''
1847 '''
1847 # there are two ways to push to remote repo:
1848 # there are two ways to push to remote repo:
1848 #
1849 #
1849 # addchangegroup assumes local user can lock remote
1850 # addchangegroup assumes local user can lock remote
1850 # repo (local filesystem, old ssh servers).
1851 # repo (local filesystem, old ssh servers).
1851 #
1852 #
1852 # unbundle assumes local user cannot lock remote repo (new ssh
1853 # unbundle assumes local user cannot lock remote repo (new ssh
1853 # servers, http servers).
1854 # servers, http servers).
1854
1855
1855 if not remote.canpush():
1856 if not remote.canpush():
1856 raise util.Abort(_("destination does not support push"))
1857 raise util.Abort(_("destination does not support push"))
1857 # get local lock as we might write phase data
1858 # get local lock as we might write phase data
1858 locallock = self.lock()
1859 locallock = self.lock()
1859 try:
1860 try:
1860 self.checkpush(force, revs)
1861 self.checkpush(force, revs)
1861 lock = None
1862 lock = None
1862 unbundle = remote.capable('unbundle')
1863 unbundle = remote.capable('unbundle')
1863 if not unbundle:
1864 if not unbundle:
1864 lock = remote.lock()
1865 lock = remote.lock()
1865 try:
1866 try:
1866 # discovery
1867 # discovery
1867 fci = discovery.findcommonincoming
1868 fci = discovery.findcommonincoming
1868 commoninc = fci(self, remote, force=force)
1869 commoninc = fci(self, remote, force=force)
1869 common, inc, remoteheads = commoninc
1870 common, inc, remoteheads = commoninc
1870 fco = discovery.findcommonoutgoing
1871 fco = discovery.findcommonoutgoing
1871 outgoing = fco(self, remote, onlyheads=revs,
1872 outgoing = fco(self, remote, onlyheads=revs,
1872 commoninc=commoninc, force=force)
1873 commoninc=commoninc, force=force)
1873
1874
1874
1875
1875 if not outgoing.missing:
1876 if not outgoing.missing:
1876 # nothing to push
1877 # nothing to push
1877 scmutil.nochangesfound(self.ui, self, outgoing.excluded)
1878 scmutil.nochangesfound(self.ui, self, outgoing.excluded)
1878 ret = None
1879 ret = None
1879 else:
1880 else:
1880 # something to push
1881 # something to push
1881 if not force:
1882 if not force:
1882 # if self.obsstore == False --> no obsolete
1883 # if self.obsstore == False --> no obsolete
1883 # then, save the iteration
1884 # then, save the iteration
1884 if self.obsstore:
1885 if self.obsstore:
1885 # this message are here for 80 char limit reason
1886 # this message are here for 80 char limit reason
1886 mso = _("push includes obsolete changeset: %s!")
1887 mso = _("push includes obsolete changeset: %s!")
1887 msu = _("push includes unstable changeset: %s!")
1888 msu = _("push includes unstable changeset: %s!")
1888 msb = _("push includes bumped changeset: %s!")
1889 msb = _("push includes bumped changeset: %s!")
1889 # If we are to push if there is at least one
1890 # If we are to push if there is at least one
1890 # obsolete or unstable changeset in missing, at
1891 # obsolete or unstable changeset in missing, at
1891 # least one of the missinghead will be obsolete or
1892 # least one of the missinghead will be obsolete or
1892 # unstable. So checking heads only is ok
1893 # unstable. So checking heads only is ok
1893 for node in outgoing.missingheads:
1894 for node in outgoing.missingheads:
1894 ctx = self[node]
1895 ctx = self[node]
1895 if ctx.obsolete():
1896 if ctx.obsolete():
1896 raise util.Abort(mso % ctx)
1897 raise util.Abort(mso % ctx)
1897 elif ctx.unstable():
1898 elif ctx.unstable():
1898 raise util.Abort(msu % ctx)
1899 raise util.Abort(msu % ctx)
1899 elif ctx.bumped():
1900 elif ctx.bumped():
1900 raise util.Abort(msb % ctx)
1901 raise util.Abort(msb % ctx)
1901 discovery.checkheads(self, remote, outgoing,
1902 discovery.checkheads(self, remote, outgoing,
1902 remoteheads, newbranch,
1903 remoteheads, newbranch,
1903 bool(inc))
1904 bool(inc))
1904
1905
1905 # create a changegroup from local
1906 # create a changegroup from local
1906 if revs is None and not outgoing.excluded:
1907 if revs is None and not outgoing.excluded:
1907 # push everything,
1908 # push everything,
1908 # use the fast path, no race possible on push
1909 # use the fast path, no race possible on push
1909 cg = self._changegroup(outgoing.missing, 'push')
1910 cg = self._changegroup(outgoing.missing, 'push')
1910 else:
1911 else:
1911 cg = self.getlocalbundle('push', outgoing)
1912 cg = self.getlocalbundle('push', outgoing)
1912
1913
1913 # apply changegroup to remote
1914 # apply changegroup to remote
1914 if unbundle:
1915 if unbundle:
1915 # local repo finds heads on server, finds out what
1916 # local repo finds heads on server, finds out what
1916 # revs it must push. once revs transferred, if server
1917 # revs it must push. once revs transferred, if server
1917 # finds it has different heads (someone else won
1918 # finds it has different heads (someone else won
1918 # commit/push race), server aborts.
1919 # commit/push race), server aborts.
1919 if force:
1920 if force:
1920 remoteheads = ['force']
1921 remoteheads = ['force']
1921 # ssh: return remote's addchangegroup()
1922 # ssh: return remote's addchangegroup()
1922 # http: return remote's addchangegroup() or 0 for error
1923 # http: return remote's addchangegroup() or 0 for error
1923 ret = remote.unbundle(cg, remoteheads, 'push')
1924 ret = remote.unbundle(cg, remoteheads, 'push')
1924 else:
1925 else:
1925 # we return an integer indicating remote head count
1926 # we return an integer indicating remote head count
1926 # change
1927 # change
1927 ret = remote.addchangegroup(cg, 'push', self.url())
1928 ret = remote.addchangegroup(cg, 'push', self.url())
1928
1929
1929 if ret:
1930 if ret:
1930 # push succeed, synchronize target of the push
1931 # push succeed, synchronize target of the push
1931 cheads = outgoing.missingheads
1932 cheads = outgoing.missingheads
1932 elif revs is None:
1933 elif revs is None:
1933 # All out push fails. synchronize all common
1934 # All out push fails. synchronize all common
1934 cheads = outgoing.commonheads
1935 cheads = outgoing.commonheads
1935 else:
1936 else:
1936 # I want cheads = heads(::missingheads and ::commonheads)
1937 # I want cheads = heads(::missingheads and ::commonheads)
1937 # (missingheads is revs with secret changeset filtered out)
1938 # (missingheads is revs with secret changeset filtered out)
1938 #
1939 #
1939 # This can be expressed as:
1940 # This can be expressed as:
1940 # cheads = ( (missingheads and ::commonheads)
1941 # cheads = ( (missingheads and ::commonheads)
1941 # + (commonheads and ::missingheads))"
1942 # + (commonheads and ::missingheads))"
1942 # )
1943 # )
1943 #
1944 #
1944 # while trying to push we already computed the following:
1945 # while trying to push we already computed the following:
1945 # common = (::commonheads)
1946 # common = (::commonheads)
1946 # missing = ((commonheads::missingheads) - commonheads)
1947 # missing = ((commonheads::missingheads) - commonheads)
1947 #
1948 #
1948 # We can pick:
1949 # We can pick:
1949 # * missingheads part of common (::commonheads)
1950 # * missingheads part of common (::commonheads)
1950 common = set(outgoing.common)
1951 common = set(outgoing.common)
1951 cheads = [node for node in revs if node in common]
1952 cheads = [node for node in revs if node in common]
1952 # and
1953 # and
1953 # * commonheads parents on missing
1954 # * commonheads parents on missing
1954 revset = self.set('%ln and parents(roots(%ln))',
1955 revset = self.set('%ln and parents(roots(%ln))',
1955 outgoing.commonheads,
1956 outgoing.commonheads,
1956 outgoing.missing)
1957 outgoing.missing)
1957 cheads.extend(c.node() for c in revset)
1958 cheads.extend(c.node() for c in revset)
1958 # even when we don't push, exchanging phase data is useful
1959 # even when we don't push, exchanging phase data is useful
1959 remotephases = remote.listkeys('phases')
1960 remotephases = remote.listkeys('phases')
1960 if not remotephases: # old server or public only repo
1961 if not remotephases: # old server or public only repo
1961 phases.advanceboundary(self, phases.public, cheads)
1962 phases.advanceboundary(self, phases.public, cheads)
1962 # don't push any phase data as there is nothing to push
1963 # don't push any phase data as there is nothing to push
1963 else:
1964 else:
1964 ana = phases.analyzeremotephases(self, cheads, remotephases)
1965 ana = phases.analyzeremotephases(self, cheads, remotephases)
1965 pheads, droots = ana
1966 pheads, droots = ana
1966 ### Apply remote phase on local
1967 ### Apply remote phase on local
1967 if remotephases.get('publishing', False):
1968 if remotephases.get('publishing', False):
1968 phases.advanceboundary(self, phases.public, cheads)
1969 phases.advanceboundary(self, phases.public, cheads)
1969 else: # publish = False
1970 else: # publish = False
1970 phases.advanceboundary(self, phases.public, pheads)
1971 phases.advanceboundary(self, phases.public, pheads)
1971 phases.advanceboundary(self, phases.draft, cheads)
1972 phases.advanceboundary(self, phases.draft, cheads)
1972 ### Apply local phase on remote
1973 ### Apply local phase on remote
1973
1974
1974 # Get the list of all revs draft on remote by public here.
1975 # Get the list of all revs draft on remote by public here.
1975 # XXX Beware that revset break if droots is not strictly
1976 # XXX Beware that revset break if droots is not strictly
1976 # XXX root we may want to ensure it is but it is costly
1977 # XXX root we may want to ensure it is but it is costly
1977 outdated = self.set('heads((%ln::%ln) and public())',
1978 outdated = self.set('heads((%ln::%ln) and public())',
1978 droots, cheads)
1979 droots, cheads)
1979 for newremotehead in outdated:
1980 for newremotehead in outdated:
1980 r = remote.pushkey('phases',
1981 r = remote.pushkey('phases',
1981 newremotehead.hex(),
1982 newremotehead.hex(),
1982 str(phases.draft),
1983 str(phases.draft),
1983 str(phases.public))
1984 str(phases.public))
1984 if not r:
1985 if not r:
1985 self.ui.warn(_('updating %s to public failed!\n')
1986 self.ui.warn(_('updating %s to public failed!\n')
1986 % newremotehead)
1987 % newremotehead)
1987 self.ui.debug('try to push obsolete markers to remote\n')
1988 self.ui.debug('try to push obsolete markers to remote\n')
1988 if (obsolete._enabled and self.obsstore and
1989 if (obsolete._enabled and self.obsstore and
1989 'obsolete' in remote.listkeys('namespaces')):
1990 'obsolete' in remote.listkeys('namespaces')):
1990 rslts = []
1991 rslts = []
1991 remotedata = self.listkeys('obsolete')
1992 remotedata = self.listkeys('obsolete')
1992 for key in sorted(remotedata, reverse=True):
1993 for key in sorted(remotedata, reverse=True):
1993 # reverse sort to ensure we end with dump0
1994 # reverse sort to ensure we end with dump0
1994 data = remotedata[key]
1995 data = remotedata[key]
1995 rslts.append(remote.pushkey('obsolete', key, '', data))
1996 rslts.append(remote.pushkey('obsolete', key, '', data))
1996 if [r for r in rslts if not r]:
1997 if [r for r in rslts if not r]:
1997 msg = _('failed to push some obsolete markers!\n')
1998 msg = _('failed to push some obsolete markers!\n')
1998 self.ui.warn(msg)
1999 self.ui.warn(msg)
1999 finally:
2000 finally:
2000 if lock is not None:
2001 if lock is not None:
2001 lock.release()
2002 lock.release()
2002 finally:
2003 finally:
2003 locallock.release()
2004 locallock.release()
2004
2005
2005 self.ui.debug("checking for updated bookmarks\n")
2006 self.ui.debug("checking for updated bookmarks\n")
2006 rb = remote.listkeys('bookmarks')
2007 rb = remote.listkeys('bookmarks')
2007 for k in rb.keys():
2008 for k in rb.keys():
2008 if k in self._bookmarks:
2009 if k in self._bookmarks:
2009 nr, nl = rb[k], hex(self._bookmarks[k])
2010 nr, nl = rb[k], hex(self._bookmarks[k])
2010 if nr in self:
2011 if nr in self:
2011 cr = self[nr]
2012 cr = self[nr]
2012 cl = self[nl]
2013 cl = self[nl]
2013 if bookmarks.validdest(self, cr, cl):
2014 if bookmarks.validdest(self, cr, cl):
2014 r = remote.pushkey('bookmarks', k, nr, nl)
2015 r = remote.pushkey('bookmarks', k, nr, nl)
2015 if r:
2016 if r:
2016 self.ui.status(_("updating bookmark %s\n") % k)
2017 self.ui.status(_("updating bookmark %s\n") % k)
2017 else:
2018 else:
2018 self.ui.warn(_('updating bookmark %s'
2019 self.ui.warn(_('updating bookmark %s'
2019 ' failed!\n') % k)
2020 ' failed!\n') % k)
2020
2021
2021 return ret
2022 return ret
2022
2023
2023 def changegroupinfo(self, nodes, source):
2024 def changegroupinfo(self, nodes, source):
2024 if self.ui.verbose or source == 'bundle':
2025 if self.ui.verbose or source == 'bundle':
2025 self.ui.status(_("%d changesets found\n") % len(nodes))
2026 self.ui.status(_("%d changesets found\n") % len(nodes))
2026 if self.ui.debugflag:
2027 if self.ui.debugflag:
2027 self.ui.debug("list of changesets:\n")
2028 self.ui.debug("list of changesets:\n")
2028 for node in nodes:
2029 for node in nodes:
2029 self.ui.debug("%s\n" % hex(node))
2030 self.ui.debug("%s\n" % hex(node))
2030
2031
2031 def changegroupsubset(self, bases, heads, source):
2032 def changegroupsubset(self, bases, heads, source):
2032 """Compute a changegroup consisting of all the nodes that are
2033 """Compute a changegroup consisting of all the nodes that are
2033 descendants of any of the bases and ancestors of any of the heads.
2034 descendants of any of the bases and ancestors of any of the heads.
2034 Return a chunkbuffer object whose read() method will return
2035 Return a chunkbuffer object whose read() method will return
2035 successive changegroup chunks.
2036 successive changegroup chunks.
2036
2037
2037 It is fairly complex as determining which filenodes and which
2038 It is fairly complex as determining which filenodes and which
2038 manifest nodes need to be included for the changeset to be complete
2039 manifest nodes need to be included for the changeset to be complete
2039 is non-trivial.
2040 is non-trivial.
2040
2041
2041 Another wrinkle is doing the reverse, figuring out which changeset in
2042 Another wrinkle is doing the reverse, figuring out which changeset in
2042 the changegroup a particular filenode or manifestnode belongs to.
2043 the changegroup a particular filenode or manifestnode belongs to.
2043 """
2044 """
2044 cl = self.changelog
2045 cl = self.changelog
2045 if not bases:
2046 if not bases:
2046 bases = [nullid]
2047 bases = [nullid]
2047 csets, bases, heads = cl.nodesbetween(bases, heads)
2048 csets, bases, heads = cl.nodesbetween(bases, heads)
2048 # We assume that all ancestors of bases are known
2049 # We assume that all ancestors of bases are known
2049 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2050 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2050 return self._changegroupsubset(common, csets, heads, source)
2051 return self._changegroupsubset(common, csets, heads, source)
2051
2052
2052 def getlocalbundle(self, source, outgoing):
2053 def getlocalbundle(self, source, outgoing):
2053 """Like getbundle, but taking a discovery.outgoing as an argument.
2054 """Like getbundle, but taking a discovery.outgoing as an argument.
2054
2055
2055 This is only implemented for local repos and reuses potentially
2056 This is only implemented for local repos and reuses potentially
2056 precomputed sets in outgoing."""
2057 precomputed sets in outgoing."""
2057 if not outgoing.missing:
2058 if not outgoing.missing:
2058 return None
2059 return None
2059 return self._changegroupsubset(outgoing.common,
2060 return self._changegroupsubset(outgoing.common,
2060 outgoing.missing,
2061 outgoing.missing,
2061 outgoing.missingheads,
2062 outgoing.missingheads,
2062 source)
2063 source)
2063
2064
2064 def getbundle(self, source, heads=None, common=None):
2065 def getbundle(self, source, heads=None, common=None):
2065 """Like changegroupsubset, but returns the set difference between the
2066 """Like changegroupsubset, but returns the set difference between the
2066 ancestors of heads and the ancestors common.
2067 ancestors of heads and the ancestors common.
2067
2068
2068 If heads is None, use the local heads. If common is None, use [nullid].
2069 If heads is None, use the local heads. If common is None, use [nullid].
2069
2070
2070 The nodes in common might not all be known locally due to the way the
2071 The nodes in common might not all be known locally due to the way the
2071 current discovery protocol works.
2072 current discovery protocol works.
2072 """
2073 """
2073 cl = self.changelog
2074 cl = self.changelog
2074 if common:
2075 if common:
2075 nm = cl.nodemap
2076 nm = cl.nodemap
2076 common = [n for n in common if n in nm]
2077 common = [n for n in common if n in nm]
2077 else:
2078 else:
2078 common = [nullid]
2079 common = [nullid]
2079 if not heads:
2080 if not heads:
2080 heads = cl.heads()
2081 heads = cl.heads()
2081 return self.getlocalbundle(source,
2082 return self.getlocalbundle(source,
2082 discovery.outgoing(cl, common, heads))
2083 discovery.outgoing(cl, common, heads))
2083
2084
2084 def _changegroupsubset(self, commonrevs, csets, heads, source):
2085 def _changegroupsubset(self, commonrevs, csets, heads, source):
2085
2086
2086 cl = self.changelog
2087 cl = self.changelog
2087 mf = self.manifest
2088 mf = self.manifest
2088 mfs = {} # needed manifests
2089 mfs = {} # needed manifests
2089 fnodes = {} # needed file nodes
2090 fnodes = {} # needed file nodes
2090 changedfiles = set()
2091 changedfiles = set()
2091 fstate = ['', {}]
2092 fstate = ['', {}]
2092 count = [0, 0]
2093 count = [0, 0]
2093
2094
2094 # can we go through the fast path ?
2095 # can we go through the fast path ?
2095 heads.sort()
2096 heads.sort()
2096 if heads == sorted(self.heads()):
2097 if heads == sorted(self.heads()):
2097 return self._changegroup(csets, source)
2098 return self._changegroup(csets, source)
2098
2099
2099 # slow path
2100 # slow path
2100 self.hook('preoutgoing', throw=True, source=source)
2101 self.hook('preoutgoing', throw=True, source=source)
2101 self.changegroupinfo(csets, source)
2102 self.changegroupinfo(csets, source)
2102
2103
2103 # filter any nodes that claim to be part of the known set
2104 # filter any nodes that claim to be part of the known set
2104 def prune(revlog, missing):
2105 def prune(revlog, missing):
2105 rr, rl = revlog.rev, revlog.linkrev
2106 rr, rl = revlog.rev, revlog.linkrev
2106 return [n for n in missing
2107 return [n for n in missing
2107 if rl(rr(n)) not in commonrevs]
2108 if rl(rr(n)) not in commonrevs]
2108
2109
2109 progress = self.ui.progress
2110 progress = self.ui.progress
2110 _bundling = _('bundling')
2111 _bundling = _('bundling')
2111 _changesets = _('changesets')
2112 _changesets = _('changesets')
2112 _manifests = _('manifests')
2113 _manifests = _('manifests')
2113 _files = _('files')
2114 _files = _('files')
2114
2115
2115 def lookup(revlog, x):
2116 def lookup(revlog, x):
2116 if revlog == cl:
2117 if revlog == cl:
2117 c = cl.read(x)
2118 c = cl.read(x)
2118 changedfiles.update(c[3])
2119 changedfiles.update(c[3])
2119 mfs.setdefault(c[0], x)
2120 mfs.setdefault(c[0], x)
2120 count[0] += 1
2121 count[0] += 1
2121 progress(_bundling, count[0],
2122 progress(_bundling, count[0],
2122 unit=_changesets, total=count[1])
2123 unit=_changesets, total=count[1])
2123 return x
2124 return x
2124 elif revlog == mf:
2125 elif revlog == mf:
2125 clnode = mfs[x]
2126 clnode = mfs[x]
2126 mdata = mf.readfast(x)
2127 mdata = mf.readfast(x)
2127 for f, n in mdata.iteritems():
2128 for f, n in mdata.iteritems():
2128 if f in changedfiles:
2129 if f in changedfiles:
2129 fnodes[f].setdefault(n, clnode)
2130 fnodes[f].setdefault(n, clnode)
2130 count[0] += 1
2131 count[0] += 1
2131 progress(_bundling, count[0],
2132 progress(_bundling, count[0],
2132 unit=_manifests, total=count[1])
2133 unit=_manifests, total=count[1])
2133 return clnode
2134 return clnode
2134 else:
2135 else:
2135 progress(_bundling, count[0], item=fstate[0],
2136 progress(_bundling, count[0], item=fstate[0],
2136 unit=_files, total=count[1])
2137 unit=_files, total=count[1])
2137 return fstate[1][x]
2138 return fstate[1][x]
2138
2139
2139 bundler = changegroup.bundle10(lookup)
2140 bundler = changegroup.bundle10(lookup)
2140 reorder = self.ui.config('bundle', 'reorder', 'auto')
2141 reorder = self.ui.config('bundle', 'reorder', 'auto')
2141 if reorder == 'auto':
2142 if reorder == 'auto':
2142 reorder = None
2143 reorder = None
2143 else:
2144 else:
2144 reorder = util.parsebool(reorder)
2145 reorder = util.parsebool(reorder)
2145
2146
2146 def gengroup():
2147 def gengroup():
2147 # Create a changenode group generator that will call our functions
2148 # Create a changenode group generator that will call our functions
2148 # back to lookup the owning changenode and collect information.
2149 # back to lookup the owning changenode and collect information.
2149 count[:] = [0, len(csets)]
2150 count[:] = [0, len(csets)]
2150 for chunk in cl.group(csets, bundler, reorder=reorder):
2151 for chunk in cl.group(csets, bundler, reorder=reorder):
2151 yield chunk
2152 yield chunk
2152 progress(_bundling, None)
2153 progress(_bundling, None)
2153
2154
2154 # Create a generator for the manifestnodes that calls our lookup
2155 # Create a generator for the manifestnodes that calls our lookup
2155 # and data collection functions back.
2156 # and data collection functions back.
2156 for f in changedfiles:
2157 for f in changedfiles:
2157 fnodes[f] = {}
2158 fnodes[f] = {}
2158 count[:] = [0, len(mfs)]
2159 count[:] = [0, len(mfs)]
2159 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2160 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2160 yield chunk
2161 yield chunk
2161 progress(_bundling, None)
2162 progress(_bundling, None)
2162
2163
2163 mfs.clear()
2164 mfs.clear()
2164
2165
2165 # Go through all our files in order sorted by name.
2166 # Go through all our files in order sorted by name.
2166 count[:] = [0, len(changedfiles)]
2167 count[:] = [0, len(changedfiles)]
2167 for fname in sorted(changedfiles):
2168 for fname in sorted(changedfiles):
2168 filerevlog = self.file(fname)
2169 filerevlog = self.file(fname)
2169 if not len(filerevlog):
2170 if not len(filerevlog):
2170 raise util.Abort(_("empty or missing revlog for %s")
2171 raise util.Abort(_("empty or missing revlog for %s")
2171 % fname)
2172 % fname)
2172 fstate[0] = fname
2173 fstate[0] = fname
2173 fstate[1] = fnodes.pop(fname, {})
2174 fstate[1] = fnodes.pop(fname, {})
2174
2175
2175 nodelist = prune(filerevlog, fstate[1])
2176 nodelist = prune(filerevlog, fstate[1])
2176 if nodelist:
2177 if nodelist:
2177 count[0] += 1
2178 count[0] += 1
2178 yield bundler.fileheader(fname)
2179 yield bundler.fileheader(fname)
2179 for chunk in filerevlog.group(nodelist, bundler, reorder):
2180 for chunk in filerevlog.group(nodelist, bundler, reorder):
2180 yield chunk
2181 yield chunk
2181
2182
2182 # Signal that no more groups are left.
2183 # Signal that no more groups are left.
2183 yield bundler.close()
2184 yield bundler.close()
2184 progress(_bundling, None)
2185 progress(_bundling, None)
2185
2186
2186 if csets:
2187 if csets:
2187 self.hook('outgoing', node=hex(csets[0]), source=source)
2188 self.hook('outgoing', node=hex(csets[0]), source=source)
2188
2189
2189 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2190 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2190
2191
2191 def changegroup(self, basenodes, source):
2192 def changegroup(self, basenodes, source):
2192 # to avoid a race we use changegroupsubset() (issue1320)
2193 # to avoid a race we use changegroupsubset() (issue1320)
2193 return self.changegroupsubset(basenodes, self.heads(), source)
2194 return self.changegroupsubset(basenodes, self.heads(), source)
2194
2195
2195 def _changegroup(self, nodes, source):
2196 def _changegroup(self, nodes, source):
2196 """Compute the changegroup of all nodes that we have that a recipient
2197 """Compute the changegroup of all nodes that we have that a recipient
2197 doesn't. Return a chunkbuffer object whose read() method will return
2198 doesn't. Return a chunkbuffer object whose read() method will return
2198 successive changegroup chunks.
2199 successive changegroup chunks.
2199
2200
2200 This is much easier than the previous function as we can assume that
2201 This is much easier than the previous function as we can assume that
2201 the recipient has any changenode we aren't sending them.
2202 the recipient has any changenode we aren't sending them.
2202
2203
2203 nodes is the set of nodes to send"""
2204 nodes is the set of nodes to send"""
2204
2205
2205 cl = self.changelog
2206 cl = self.changelog
2206 mf = self.manifest
2207 mf = self.manifest
2207 mfs = {}
2208 mfs = {}
2208 changedfiles = set()
2209 changedfiles = set()
2209 fstate = ['']
2210 fstate = ['']
2210 count = [0, 0]
2211 count = [0, 0]
2211
2212
2212 self.hook('preoutgoing', throw=True, source=source)
2213 self.hook('preoutgoing', throw=True, source=source)
2213 self.changegroupinfo(nodes, source)
2214 self.changegroupinfo(nodes, source)
2214
2215
2215 revset = set([cl.rev(n) for n in nodes])
2216 revset = set([cl.rev(n) for n in nodes])
2216
2217
2217 def gennodelst(log):
2218 def gennodelst(log):
2218 ln, llr = log.node, log.linkrev
2219 ln, llr = log.node, log.linkrev
2219 return [ln(r) for r in log if llr(r) in revset]
2220 return [ln(r) for r in log if llr(r) in revset]
2220
2221
2221 progress = self.ui.progress
2222 progress = self.ui.progress
2222 _bundling = _('bundling')
2223 _bundling = _('bundling')
2223 _changesets = _('changesets')
2224 _changesets = _('changesets')
2224 _manifests = _('manifests')
2225 _manifests = _('manifests')
2225 _files = _('files')
2226 _files = _('files')
2226
2227
2227 def lookup(revlog, x):
2228 def lookup(revlog, x):
2228 if revlog == cl:
2229 if revlog == cl:
2229 c = cl.read(x)
2230 c = cl.read(x)
2230 changedfiles.update(c[3])
2231 changedfiles.update(c[3])
2231 mfs.setdefault(c[0], x)
2232 mfs.setdefault(c[0], x)
2232 count[0] += 1
2233 count[0] += 1
2233 progress(_bundling, count[0],
2234 progress(_bundling, count[0],
2234 unit=_changesets, total=count[1])
2235 unit=_changesets, total=count[1])
2235 return x
2236 return x
2236 elif revlog == mf:
2237 elif revlog == mf:
2237 count[0] += 1
2238 count[0] += 1
2238 progress(_bundling, count[0],
2239 progress(_bundling, count[0],
2239 unit=_manifests, total=count[1])
2240 unit=_manifests, total=count[1])
2240 return cl.node(revlog.linkrev(revlog.rev(x)))
2241 return cl.node(revlog.linkrev(revlog.rev(x)))
2241 else:
2242 else:
2242 progress(_bundling, count[0], item=fstate[0],
2243 progress(_bundling, count[0], item=fstate[0],
2243 total=count[1], unit=_files)
2244 total=count[1], unit=_files)
2244 return cl.node(revlog.linkrev(revlog.rev(x)))
2245 return cl.node(revlog.linkrev(revlog.rev(x)))
2245
2246
2246 bundler = changegroup.bundle10(lookup)
2247 bundler = changegroup.bundle10(lookup)
2247 reorder = self.ui.config('bundle', 'reorder', 'auto')
2248 reorder = self.ui.config('bundle', 'reorder', 'auto')
2248 if reorder == 'auto':
2249 if reorder == 'auto':
2249 reorder = None
2250 reorder = None
2250 else:
2251 else:
2251 reorder = util.parsebool(reorder)
2252 reorder = util.parsebool(reorder)
2252
2253
2253 def gengroup():
2254 def gengroup():
2254 '''yield a sequence of changegroup chunks (strings)'''
2255 '''yield a sequence of changegroup chunks (strings)'''
2255 # construct a list of all changed files
2256 # construct a list of all changed files
2256
2257
2257 count[:] = [0, len(nodes)]
2258 count[:] = [0, len(nodes)]
2258 for chunk in cl.group(nodes, bundler, reorder=reorder):
2259 for chunk in cl.group(nodes, bundler, reorder=reorder):
2259 yield chunk
2260 yield chunk
2260 progress(_bundling, None)
2261 progress(_bundling, None)
2261
2262
2262 count[:] = [0, len(mfs)]
2263 count[:] = [0, len(mfs)]
2263 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2264 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2264 yield chunk
2265 yield chunk
2265 progress(_bundling, None)
2266 progress(_bundling, None)
2266
2267
2267 count[:] = [0, len(changedfiles)]
2268 count[:] = [0, len(changedfiles)]
2268 for fname in sorted(changedfiles):
2269 for fname in sorted(changedfiles):
2269 filerevlog = self.file(fname)
2270 filerevlog = self.file(fname)
2270 if not len(filerevlog):
2271 if not len(filerevlog):
2271 raise util.Abort(_("empty or missing revlog for %s")
2272 raise util.Abort(_("empty or missing revlog for %s")
2272 % fname)
2273 % fname)
2273 fstate[0] = fname
2274 fstate[0] = fname
2274 nodelist = gennodelst(filerevlog)
2275 nodelist = gennodelst(filerevlog)
2275 if nodelist:
2276 if nodelist:
2276 count[0] += 1
2277 count[0] += 1
2277 yield bundler.fileheader(fname)
2278 yield bundler.fileheader(fname)
2278 for chunk in filerevlog.group(nodelist, bundler, reorder):
2279 for chunk in filerevlog.group(nodelist, bundler, reorder):
2279 yield chunk
2280 yield chunk
2280 yield bundler.close()
2281 yield bundler.close()
2281 progress(_bundling, None)
2282 progress(_bundling, None)
2282
2283
2283 if nodes:
2284 if nodes:
2284 self.hook('outgoing', node=hex(nodes[0]), source=source)
2285 self.hook('outgoing', node=hex(nodes[0]), source=source)
2285
2286
2286 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2287 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2287
2288
2288 def addchangegroup(self, source, srctype, url, emptyok=False):
2289 def addchangegroup(self, source, srctype, url, emptyok=False):
2289 """Add the changegroup returned by source.read() to this repo.
2290 """Add the changegroup returned by source.read() to this repo.
2290 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2291 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2291 the URL of the repo where this changegroup is coming from.
2292 the URL of the repo where this changegroup is coming from.
2292
2293
2293 Return an integer summarizing the change to this repo:
2294 Return an integer summarizing the change to this repo:
2294 - nothing changed or no source: 0
2295 - nothing changed or no source: 0
2295 - more heads than before: 1+added heads (2..n)
2296 - more heads than before: 1+added heads (2..n)
2296 - fewer heads than before: -1-removed heads (-2..-n)
2297 - fewer heads than before: -1-removed heads (-2..-n)
2297 - number of heads stays the same: 1
2298 - number of heads stays the same: 1
2298 """
2299 """
2299 def csmap(x):
2300 def csmap(x):
2300 self.ui.debug("add changeset %s\n" % short(x))
2301 self.ui.debug("add changeset %s\n" % short(x))
2301 return len(cl)
2302 return len(cl)
2302
2303
2303 def revmap(x):
2304 def revmap(x):
2304 return cl.rev(x)
2305 return cl.rev(x)
2305
2306
2306 if not source:
2307 if not source:
2307 return 0
2308 return 0
2308
2309
2309 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2310 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2310
2311
2311 changesets = files = revisions = 0
2312 changesets = files = revisions = 0
2312 efiles = set()
2313 efiles = set()
2313
2314
2314 # write changelog data to temp files so concurrent readers will not see
2315 # write changelog data to temp files so concurrent readers will not see
2315 # inconsistent view
2316 # inconsistent view
2316 cl = self.changelog
2317 cl = self.changelog
2317 cl.delayupdate()
2318 cl.delayupdate()
2318 oldheads = cl.heads()
2319 oldheads = cl.heads()
2319
2320
2320 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2321 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2321 try:
2322 try:
2322 trp = weakref.proxy(tr)
2323 trp = weakref.proxy(tr)
2323 # pull off the changeset group
2324 # pull off the changeset group
2324 self.ui.status(_("adding changesets\n"))
2325 self.ui.status(_("adding changesets\n"))
2325 clstart = len(cl)
2326 clstart = len(cl)
2326 class prog(object):
2327 class prog(object):
2327 step = _('changesets')
2328 step = _('changesets')
2328 count = 1
2329 count = 1
2329 ui = self.ui
2330 ui = self.ui
2330 total = None
2331 total = None
2331 def __call__(self):
2332 def __call__(self):
2332 self.ui.progress(self.step, self.count, unit=_('chunks'),
2333 self.ui.progress(self.step, self.count, unit=_('chunks'),
2333 total=self.total)
2334 total=self.total)
2334 self.count += 1
2335 self.count += 1
2335 pr = prog()
2336 pr = prog()
2336 source.callback = pr
2337 source.callback = pr
2337
2338
2338 source.changelogheader()
2339 source.changelogheader()
2339 srccontent = cl.addgroup(source, csmap, trp)
2340 srccontent = cl.addgroup(source, csmap, trp)
2340 if not (srccontent or emptyok):
2341 if not (srccontent or emptyok):
2341 raise util.Abort(_("received changelog group is empty"))
2342 raise util.Abort(_("received changelog group is empty"))
2342 clend = len(cl)
2343 clend = len(cl)
2343 changesets = clend - clstart
2344 changesets = clend - clstart
2344 for c in xrange(clstart, clend):
2345 for c in xrange(clstart, clend):
2345 efiles.update(self[c].files())
2346 efiles.update(self[c].files())
2346 efiles = len(efiles)
2347 efiles = len(efiles)
2347 self.ui.progress(_('changesets'), None)
2348 self.ui.progress(_('changesets'), None)
2348
2349
2349 # pull off the manifest group
2350 # pull off the manifest group
2350 self.ui.status(_("adding manifests\n"))
2351 self.ui.status(_("adding manifests\n"))
2351 pr.step = _('manifests')
2352 pr.step = _('manifests')
2352 pr.count = 1
2353 pr.count = 1
2353 pr.total = changesets # manifests <= changesets
2354 pr.total = changesets # manifests <= changesets
2354 # no need to check for empty manifest group here:
2355 # no need to check for empty manifest group here:
2355 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2356 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2356 # no new manifest will be created and the manifest group will
2357 # no new manifest will be created and the manifest group will
2357 # be empty during the pull
2358 # be empty during the pull
2358 source.manifestheader()
2359 source.manifestheader()
2359 self.manifest.addgroup(source, revmap, trp)
2360 self.manifest.addgroup(source, revmap, trp)
2360 self.ui.progress(_('manifests'), None)
2361 self.ui.progress(_('manifests'), None)
2361
2362
2362 needfiles = {}
2363 needfiles = {}
2363 if self.ui.configbool('server', 'validate', default=False):
2364 if self.ui.configbool('server', 'validate', default=False):
2364 # validate incoming csets have their manifests
2365 # validate incoming csets have their manifests
2365 for cset in xrange(clstart, clend):
2366 for cset in xrange(clstart, clend):
2366 mfest = self.changelog.read(self.changelog.node(cset))[0]
2367 mfest = self.changelog.read(self.changelog.node(cset))[0]
2367 mfest = self.manifest.readdelta(mfest)
2368 mfest = self.manifest.readdelta(mfest)
2368 # store file nodes we must see
2369 # store file nodes we must see
2369 for f, n in mfest.iteritems():
2370 for f, n in mfest.iteritems():
2370 needfiles.setdefault(f, set()).add(n)
2371 needfiles.setdefault(f, set()).add(n)
2371
2372
2372 # process the files
2373 # process the files
2373 self.ui.status(_("adding file changes\n"))
2374 self.ui.status(_("adding file changes\n"))
2374 pr.step = _('files')
2375 pr.step = _('files')
2375 pr.count = 1
2376 pr.count = 1
2376 pr.total = efiles
2377 pr.total = efiles
2377 source.callback = None
2378 source.callback = None
2378
2379
2379 while True:
2380 while True:
2380 chunkdata = source.filelogheader()
2381 chunkdata = source.filelogheader()
2381 if not chunkdata:
2382 if not chunkdata:
2382 break
2383 break
2383 f = chunkdata["filename"]
2384 f = chunkdata["filename"]
2384 self.ui.debug("adding %s revisions\n" % f)
2385 self.ui.debug("adding %s revisions\n" % f)
2385 pr()
2386 pr()
2386 fl = self.file(f)
2387 fl = self.file(f)
2387 o = len(fl)
2388 o = len(fl)
2388 if not fl.addgroup(source, revmap, trp):
2389 if not fl.addgroup(source, revmap, trp):
2389 raise util.Abort(_("received file revlog group is empty"))
2390 raise util.Abort(_("received file revlog group is empty"))
2390 revisions += len(fl) - o
2391 revisions += len(fl) - o
2391 files += 1
2392 files += 1
2392 if f in needfiles:
2393 if f in needfiles:
2393 needs = needfiles[f]
2394 needs = needfiles[f]
2394 for new in xrange(o, len(fl)):
2395 for new in xrange(o, len(fl)):
2395 n = fl.node(new)
2396 n = fl.node(new)
2396 if n in needs:
2397 if n in needs:
2397 needs.remove(n)
2398 needs.remove(n)
2398 if not needs:
2399 if not needs:
2399 del needfiles[f]
2400 del needfiles[f]
2400 self.ui.progress(_('files'), None)
2401 self.ui.progress(_('files'), None)
2401
2402
2402 for f, needs in needfiles.iteritems():
2403 for f, needs in needfiles.iteritems():
2403 fl = self.file(f)
2404 fl = self.file(f)
2404 for n in needs:
2405 for n in needs:
2405 try:
2406 try:
2406 fl.rev(n)
2407 fl.rev(n)
2407 except error.LookupError:
2408 except error.LookupError:
2408 raise util.Abort(
2409 raise util.Abort(
2409 _('missing file data for %s:%s - run hg verify') %
2410 _('missing file data for %s:%s - run hg verify') %
2410 (f, hex(n)))
2411 (f, hex(n)))
2411
2412
2412 dh = 0
2413 dh = 0
2413 if oldheads:
2414 if oldheads:
2414 heads = cl.heads()
2415 heads = cl.heads()
2415 dh = len(heads) - len(oldheads)
2416 dh = len(heads) - len(oldheads)
2416 for h in heads:
2417 for h in heads:
2417 if h not in oldheads and self[h].closesbranch():
2418 if h not in oldheads and self[h].closesbranch():
2418 dh -= 1
2419 dh -= 1
2419 htext = ""
2420 htext = ""
2420 if dh:
2421 if dh:
2421 htext = _(" (%+d heads)") % dh
2422 htext = _(" (%+d heads)") % dh
2422
2423
2423 self.ui.status(_("added %d changesets"
2424 self.ui.status(_("added %d changesets"
2424 " with %d changes to %d files%s\n")
2425 " with %d changes to %d files%s\n")
2425 % (changesets, revisions, files, htext))
2426 % (changesets, revisions, files, htext))
2426 obsolete.clearobscaches(self)
2427 obsolete.clearobscaches(self)
2427
2428
2428 if changesets > 0:
2429 if changesets > 0:
2429 p = lambda: cl.writepending() and self.root or ""
2430 p = lambda: cl.writepending() and self.root or ""
2430 self.hook('pretxnchangegroup', throw=True,
2431 self.hook('pretxnchangegroup', throw=True,
2431 node=hex(cl.node(clstart)), source=srctype,
2432 node=hex(cl.node(clstart)), source=srctype,
2432 url=url, pending=p)
2433 url=url, pending=p)
2433
2434
2434 added = [cl.node(r) for r in xrange(clstart, clend)]
2435 added = [cl.node(r) for r in xrange(clstart, clend)]
2435 publishing = self.ui.configbool('phases', 'publish', True)
2436 publishing = self.ui.configbool('phases', 'publish', True)
2436 if srctype == 'push':
2437 if srctype == 'push':
2437 # Old server can not push the boundary themself.
2438 # Old server can not push the boundary themself.
2438 # New server won't push the boundary if changeset already
2439 # New server won't push the boundary if changeset already
2439 # existed locally as secrete
2440 # existed locally as secrete
2440 #
2441 #
2441 # We should not use added here but the list of all change in
2442 # We should not use added here but the list of all change in
2442 # the bundle
2443 # the bundle
2443 if publishing:
2444 if publishing:
2444 phases.advanceboundary(self, phases.public, srccontent)
2445 phases.advanceboundary(self, phases.public, srccontent)
2445 else:
2446 else:
2446 phases.advanceboundary(self, phases.draft, srccontent)
2447 phases.advanceboundary(self, phases.draft, srccontent)
2447 phases.retractboundary(self, phases.draft, added)
2448 phases.retractboundary(self, phases.draft, added)
2448 elif srctype != 'strip':
2449 elif srctype != 'strip':
2449 # publishing only alter behavior during push
2450 # publishing only alter behavior during push
2450 #
2451 #
2451 # strip should not touch boundary at all
2452 # strip should not touch boundary at all
2452 phases.retractboundary(self, phases.draft, added)
2453 phases.retractboundary(self, phases.draft, added)
2453
2454
2454 # make changelog see real files again
2455 # make changelog see real files again
2455 cl.finalize(trp)
2456 cl.finalize(trp)
2456
2457
2457 tr.close()
2458 tr.close()
2458
2459
2459 if changesets > 0:
2460 if changesets > 0:
2460 self.updatebranchcache()
2461 self.updatebranchcache()
2461 def runhooks():
2462 def runhooks():
2462 # forcefully update the on-disk branch cache
2463 # forcefully update the on-disk branch cache
2463 self.ui.debug("updating the branch cache\n")
2464 self.ui.debug("updating the branch cache\n")
2464 self.hook("changegroup", node=hex(cl.node(clstart)),
2465 self.hook("changegroup", node=hex(cl.node(clstart)),
2465 source=srctype, url=url)
2466 source=srctype, url=url)
2466
2467
2467 for n in added:
2468 for n in added:
2468 self.hook("incoming", node=hex(n), source=srctype,
2469 self.hook("incoming", node=hex(n), source=srctype,
2469 url=url)
2470 url=url)
2470 self._afterlock(runhooks)
2471 self._afterlock(runhooks)
2471
2472
2472 finally:
2473 finally:
2473 tr.release()
2474 tr.release()
2474 # never return 0 here:
2475 # never return 0 here:
2475 if dh < 0:
2476 if dh < 0:
2476 return dh - 1
2477 return dh - 1
2477 else:
2478 else:
2478 return dh + 1
2479 return dh + 1
2479
2480
2480 def stream_in(self, remote, requirements):
2481 def stream_in(self, remote, requirements):
2481 lock = self.lock()
2482 lock = self.lock()
2482 try:
2483 try:
2483 # Save remote branchmap. We will use it later
2484 # Save remote branchmap. We will use it later
2484 # to speed up branchcache creation
2485 # to speed up branchcache creation
2485 rbranchmap = None
2486 rbranchmap = None
2486 if remote.capable("branchmap"):
2487 if remote.capable("branchmap"):
2487 rbranchmap = remote.branchmap()
2488 rbranchmap = remote.branchmap()
2488
2489
2489 fp = remote.stream_out()
2490 fp = remote.stream_out()
2490 l = fp.readline()
2491 l = fp.readline()
2491 try:
2492 try:
2492 resp = int(l)
2493 resp = int(l)
2493 except ValueError:
2494 except ValueError:
2494 raise error.ResponseError(
2495 raise error.ResponseError(
2495 _('unexpected response from remote server:'), l)
2496 _('unexpected response from remote server:'), l)
2496 if resp == 1:
2497 if resp == 1:
2497 raise util.Abort(_('operation forbidden by server'))
2498 raise util.Abort(_('operation forbidden by server'))
2498 elif resp == 2:
2499 elif resp == 2:
2499 raise util.Abort(_('locking the remote repository failed'))
2500 raise util.Abort(_('locking the remote repository failed'))
2500 elif resp != 0:
2501 elif resp != 0:
2501 raise util.Abort(_('the server sent an unknown error code'))
2502 raise util.Abort(_('the server sent an unknown error code'))
2502 self.ui.status(_('streaming all changes\n'))
2503 self.ui.status(_('streaming all changes\n'))
2503 l = fp.readline()
2504 l = fp.readline()
2504 try:
2505 try:
2505 total_files, total_bytes = map(int, l.split(' ', 1))
2506 total_files, total_bytes = map(int, l.split(' ', 1))
2506 except (ValueError, TypeError):
2507 except (ValueError, TypeError):
2507 raise error.ResponseError(
2508 raise error.ResponseError(
2508 _('unexpected response from remote server:'), l)
2509 _('unexpected response from remote server:'), l)
2509 self.ui.status(_('%d files to transfer, %s of data\n') %
2510 self.ui.status(_('%d files to transfer, %s of data\n') %
2510 (total_files, util.bytecount(total_bytes)))
2511 (total_files, util.bytecount(total_bytes)))
2511 handled_bytes = 0
2512 handled_bytes = 0
2512 self.ui.progress(_('clone'), 0, total=total_bytes)
2513 self.ui.progress(_('clone'), 0, total=total_bytes)
2513 start = time.time()
2514 start = time.time()
2514 for i in xrange(total_files):
2515 for i in xrange(total_files):
2515 # XXX doesn't support '\n' or '\r' in filenames
2516 # XXX doesn't support '\n' or '\r' in filenames
2516 l = fp.readline()
2517 l = fp.readline()
2517 try:
2518 try:
2518 name, size = l.split('\0', 1)
2519 name, size = l.split('\0', 1)
2519 size = int(size)
2520 size = int(size)
2520 except (ValueError, TypeError):
2521 except (ValueError, TypeError):
2521 raise error.ResponseError(
2522 raise error.ResponseError(
2522 _('unexpected response from remote server:'), l)
2523 _('unexpected response from remote server:'), l)
2523 if self.ui.debugflag:
2524 if self.ui.debugflag:
2524 self.ui.debug('adding %s (%s)\n' %
2525 self.ui.debug('adding %s (%s)\n' %
2525 (name, util.bytecount(size)))
2526 (name, util.bytecount(size)))
2526 # for backwards compat, name was partially encoded
2527 # for backwards compat, name was partially encoded
2527 ofp = self.sopener(store.decodedir(name), 'w')
2528 ofp = self.sopener(store.decodedir(name), 'w')
2528 for chunk in util.filechunkiter(fp, limit=size):
2529 for chunk in util.filechunkiter(fp, limit=size):
2529 handled_bytes += len(chunk)
2530 handled_bytes += len(chunk)
2530 self.ui.progress(_('clone'), handled_bytes,
2531 self.ui.progress(_('clone'), handled_bytes,
2531 total=total_bytes)
2532 total=total_bytes)
2532 ofp.write(chunk)
2533 ofp.write(chunk)
2533 ofp.close()
2534 ofp.close()
2534 elapsed = time.time() - start
2535 elapsed = time.time() - start
2535 if elapsed <= 0:
2536 if elapsed <= 0:
2536 elapsed = 0.001
2537 elapsed = 0.001
2537 self.ui.progress(_('clone'), None)
2538 self.ui.progress(_('clone'), None)
2538 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2539 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2539 (util.bytecount(total_bytes), elapsed,
2540 (util.bytecount(total_bytes), elapsed,
2540 util.bytecount(total_bytes / elapsed)))
2541 util.bytecount(total_bytes / elapsed)))
2541
2542
2542 # new requirements = old non-format requirements +
2543 # new requirements = old non-format requirements +
2543 # new format-related
2544 # new format-related
2544 # requirements from the streamed-in repository
2545 # requirements from the streamed-in repository
2545 requirements.update(set(self.requirements) - self.supportedformats)
2546 requirements.update(set(self.requirements) - self.supportedformats)
2546 self._applyrequirements(requirements)
2547 self._applyrequirements(requirements)
2547 self._writerequirements()
2548 self._writerequirements()
2548
2549
2549 if rbranchmap:
2550 if rbranchmap:
2550 rbheads = []
2551 rbheads = []
2551 for bheads in rbranchmap.itervalues():
2552 for bheads in rbranchmap.itervalues():
2552 rbheads.extend(bheads)
2553 rbheads.extend(bheads)
2553
2554
2554 self.branchcache = rbranchmap
2555 self.branchcache = rbranchmap
2555 if rbheads:
2556 if rbheads:
2556 rtiprev = max((int(self.changelog.rev(node))
2557 rtiprev = max((int(self.changelog.rev(node))
2557 for node in rbheads))
2558 for node in rbheads))
2558 self._writebranchcache(self.branchcache,
2559 self._writebranchcache(self.branchcache,
2559 self[rtiprev].node(), rtiprev)
2560 self[rtiprev].node(), rtiprev)
2560 self.invalidate()
2561 self.invalidate()
2561 return len(self.heads()) + 1
2562 return len(self.heads()) + 1
2562 finally:
2563 finally:
2563 lock.release()
2564 lock.release()
2564
2565
2565 def clone(self, remote, heads=[], stream=False):
2566 def clone(self, remote, heads=[], stream=False):
2566 '''clone remote repository.
2567 '''clone remote repository.
2567
2568
2568 keyword arguments:
2569 keyword arguments:
2569 heads: list of revs to clone (forces use of pull)
2570 heads: list of revs to clone (forces use of pull)
2570 stream: use streaming clone if possible'''
2571 stream: use streaming clone if possible'''
2571
2572
2572 # now, all clients that can request uncompressed clones can
2573 # now, all clients that can request uncompressed clones can
2573 # read repo formats supported by all servers that can serve
2574 # read repo formats supported by all servers that can serve
2574 # them.
2575 # them.
2575
2576
2576 # if revlog format changes, client will have to check version
2577 # if revlog format changes, client will have to check version
2577 # and format flags on "stream" capability, and use
2578 # and format flags on "stream" capability, and use
2578 # uncompressed only if compatible.
2579 # uncompressed only if compatible.
2579
2580
2580 if not stream:
2581 if not stream:
2581 # if the server explicitly prefers to stream (for fast LANs)
2582 # if the server explicitly prefers to stream (for fast LANs)
2582 stream = remote.capable('stream-preferred')
2583 stream = remote.capable('stream-preferred')
2583
2584
2584 if stream and not heads:
2585 if stream and not heads:
2585 # 'stream' means remote revlog format is revlogv1 only
2586 # 'stream' means remote revlog format is revlogv1 only
2586 if remote.capable('stream'):
2587 if remote.capable('stream'):
2587 return self.stream_in(remote, set(('revlogv1',)))
2588 return self.stream_in(remote, set(('revlogv1',)))
2588 # otherwise, 'streamreqs' contains the remote revlog format
2589 # otherwise, 'streamreqs' contains the remote revlog format
2589 streamreqs = remote.capable('streamreqs')
2590 streamreqs = remote.capable('streamreqs')
2590 if streamreqs:
2591 if streamreqs:
2591 streamreqs = set(streamreqs.split(','))
2592 streamreqs = set(streamreqs.split(','))
2592 # if we support it, stream in and adjust our requirements
2593 # if we support it, stream in and adjust our requirements
2593 if not streamreqs - self.supportedformats:
2594 if not streamreqs - self.supportedformats:
2594 return self.stream_in(remote, streamreqs)
2595 return self.stream_in(remote, streamreqs)
2595 return self.pull(remote, heads)
2596 return self.pull(remote, heads)
2596
2597
2597 def pushkey(self, namespace, key, old, new):
2598 def pushkey(self, namespace, key, old, new):
2598 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2599 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2599 old=old, new=new)
2600 old=old, new=new)
2600 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2601 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2601 ret = pushkey.push(self, namespace, key, old, new)
2602 ret = pushkey.push(self, namespace, key, old, new)
2602 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2603 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2603 ret=ret)
2604 ret=ret)
2604 return ret
2605 return ret
2605
2606
2606 def listkeys(self, namespace):
2607 def listkeys(self, namespace):
2607 self.hook('prelistkeys', throw=True, namespace=namespace)
2608 self.hook('prelistkeys', throw=True, namespace=namespace)
2608 self.ui.debug('listing keys for "%s"\n' % namespace)
2609 self.ui.debug('listing keys for "%s"\n' % namespace)
2609 values = pushkey.list(self, namespace)
2610 values = pushkey.list(self, namespace)
2610 self.hook('listkeys', namespace=namespace, values=values)
2611 self.hook('listkeys', namespace=namespace, values=values)
2611 return values
2612 return values
2612
2613
2613 def debugwireargs(self, one, two, three=None, four=None, five=None):
2614 def debugwireargs(self, one, two, three=None, four=None, five=None):
2614 '''used to test argument passing over the wire'''
2615 '''used to test argument passing over the wire'''
2615 return "%s %s %s %s %s" % (one, two, three, four, five)
2616 return "%s %s %s %s %s" % (one, two, three, four, five)
2616
2617
2617 def savecommitmessage(self, text):
2618 def savecommitmessage(self, text):
2618 fp = self.opener('last-message.txt', 'wb')
2619 fp = self.opener('last-message.txt', 'wb')
2619 try:
2620 try:
2620 fp.write(text)
2621 fp.write(text)
2621 finally:
2622 finally:
2622 fp.close()
2623 fp.close()
2623 return self.pathto(fp.name[len(self.root)+1:])
2624 return self.pathto(fp.name[len(self.root)+1:])
2624
2625
2625 # used to avoid circular references so destructors work
2626 # used to avoid circular references so destructors work
2626 def aftertrans(files):
2627 def aftertrans(files):
2627 renamefiles = [tuple(t) for t in files]
2628 renamefiles = [tuple(t) for t in files]
2628 def a():
2629 def a():
2629 for src, dest in renamefiles:
2630 for src, dest in renamefiles:
2630 try:
2631 try:
2631 util.rename(src, dest)
2632 util.rename(src, dest)
2632 except OSError: # journal file does not yet exist
2633 except OSError: # journal file does not yet exist
2633 pass
2634 pass
2634 return a
2635 return a
2635
2636
2636 def undoname(fn):
2637 def undoname(fn):
2637 base, name = os.path.split(fn)
2638 base, name = os.path.split(fn)
2638 assert name.startswith('journal')
2639 assert name.startswith('journal')
2639 return os.path.join(base, name.replace('journal', 'undo', 1))
2640 return os.path.join(base, name.replace('journal', 'undo', 1))
2640
2641
2641 def instance(ui, path, create):
2642 def instance(ui, path, create):
2642 return localrepository(ui, util.urllocalpath(path), create)
2643 return localrepository(ui, util.urllocalpath(path), create)
2643
2644
2644 def islocal(path):
2645 def islocal(path):
2645 return True
2646 return True
General Comments 0
You need to be logged in to leave comments. Login now