##// END OF EJS Templates
clfilter: ensure changeset creation in the repo is run unfiltered...
Pierre-Yves David -
r18000:f9459bcd default
parent child Browse files
Show More
@@ -1,2651 +1,2654 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import bin, hex, nullid, nullrev, short
7 from node import bin, hex, nullid, nullrev, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock, transaction, store, encoding, base85
11 import lock, transaction, store, encoding, base85
12 import scmutil, util, extensions, hook, error, revset
12 import scmutil, util, extensions, hook, error, revset
13 import match as matchmod
13 import match as matchmod
14 import merge as mergemod
14 import merge as mergemod
15 import tags as tagsmod
15 import tags as tagsmod
16 from lock import release
16 from lock import release
17 import weakref, errno, os, time, inspect
17 import weakref, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19 filecache = scmutil.filecache
19 filecache = scmutil.filecache
20
20
21 class storecache(filecache):
21 class storecache(filecache):
22 """filecache for files in the store"""
22 """filecache for files in the store"""
23 def join(self, obj, fname):
23 def join(self, obj, fname):
24 return obj.sjoin(fname)
24 return obj.sjoin(fname)
25
25
26 def unfilteredmeth(orig):
26 def unfilteredmeth(orig):
27 """decorate method that always need to be run on unfiltered version"""
27 """decorate method that always need to be run on unfiltered version"""
28 def wrapper(repo, *args, **kwargs):
28 def wrapper(repo, *args, **kwargs):
29 return orig(repo.unfiltered(), *args, **kwargs)
29 return orig(repo.unfiltered(), *args, **kwargs)
30 return wrapper
30 return wrapper
31
31
32 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
32 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
33 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
33 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
34
34
35 class localpeer(peer.peerrepository):
35 class localpeer(peer.peerrepository):
36 '''peer for a local repo; reflects only the most recent API'''
36 '''peer for a local repo; reflects only the most recent API'''
37
37
38 def __init__(self, repo, caps=MODERNCAPS):
38 def __init__(self, repo, caps=MODERNCAPS):
39 peer.peerrepository.__init__(self)
39 peer.peerrepository.__init__(self)
40 self._repo = repo
40 self._repo = repo
41 self.ui = repo.ui
41 self.ui = repo.ui
42 self._caps = repo._restrictcapabilities(caps)
42 self._caps = repo._restrictcapabilities(caps)
43 self.requirements = repo.requirements
43 self.requirements = repo.requirements
44 self.supportedformats = repo.supportedformats
44 self.supportedformats = repo.supportedformats
45
45
46 def close(self):
46 def close(self):
47 self._repo.close()
47 self._repo.close()
48
48
49 def _capabilities(self):
49 def _capabilities(self):
50 return self._caps
50 return self._caps
51
51
52 def local(self):
52 def local(self):
53 return self._repo
53 return self._repo
54
54
55 def canpush(self):
55 def canpush(self):
56 return True
56 return True
57
57
58 def url(self):
58 def url(self):
59 return self._repo.url()
59 return self._repo.url()
60
60
61 def lookup(self, key):
61 def lookup(self, key):
62 return self._repo.lookup(key)
62 return self._repo.lookup(key)
63
63
64 def branchmap(self):
64 def branchmap(self):
65 return discovery.visiblebranchmap(self._repo)
65 return discovery.visiblebranchmap(self._repo)
66
66
67 def heads(self):
67 def heads(self):
68 return discovery.visibleheads(self._repo)
68 return discovery.visibleheads(self._repo)
69
69
70 def known(self, nodes):
70 def known(self, nodes):
71 return self._repo.known(nodes)
71 return self._repo.known(nodes)
72
72
73 def getbundle(self, source, heads=None, common=None):
73 def getbundle(self, source, heads=None, common=None):
74 return self._repo.getbundle(source, heads=heads, common=common)
74 return self._repo.getbundle(source, heads=heads, common=common)
75
75
76 # TODO We might want to move the next two calls into legacypeer and add
76 # TODO We might want to move the next two calls into legacypeer and add
77 # unbundle instead.
77 # unbundle instead.
78
78
79 def lock(self):
79 def lock(self):
80 return self._repo.lock()
80 return self._repo.lock()
81
81
82 def addchangegroup(self, cg, source, url):
82 def addchangegroup(self, cg, source, url):
83 return self._repo.addchangegroup(cg, source, url)
83 return self._repo.addchangegroup(cg, source, url)
84
84
85 def pushkey(self, namespace, key, old, new):
85 def pushkey(self, namespace, key, old, new):
86 return self._repo.pushkey(namespace, key, old, new)
86 return self._repo.pushkey(namespace, key, old, new)
87
87
88 def listkeys(self, namespace):
88 def listkeys(self, namespace):
89 return self._repo.listkeys(namespace)
89 return self._repo.listkeys(namespace)
90
90
91 def debugwireargs(self, one, two, three=None, four=None, five=None):
91 def debugwireargs(self, one, two, three=None, four=None, five=None):
92 '''used to test argument passing over the wire'''
92 '''used to test argument passing over the wire'''
93 return "%s %s %s %s %s" % (one, two, three, four, five)
93 return "%s %s %s %s %s" % (one, two, three, four, five)
94
94
95 class locallegacypeer(localpeer):
95 class locallegacypeer(localpeer):
96 '''peer extension which implements legacy methods too; used for tests with
96 '''peer extension which implements legacy methods too; used for tests with
97 restricted capabilities'''
97 restricted capabilities'''
98
98
99 def __init__(self, repo):
99 def __init__(self, repo):
100 localpeer.__init__(self, repo, caps=LEGACYCAPS)
100 localpeer.__init__(self, repo, caps=LEGACYCAPS)
101
101
102 def branches(self, nodes):
102 def branches(self, nodes):
103 return self._repo.branches(nodes)
103 return self._repo.branches(nodes)
104
104
105 def between(self, pairs):
105 def between(self, pairs):
106 return self._repo.between(pairs)
106 return self._repo.between(pairs)
107
107
108 def changegroup(self, basenodes, source):
108 def changegroup(self, basenodes, source):
109 return self._repo.changegroup(basenodes, source)
109 return self._repo.changegroup(basenodes, source)
110
110
111 def changegroupsubset(self, bases, heads, source):
111 def changegroupsubset(self, bases, heads, source):
112 return self._repo.changegroupsubset(bases, heads, source)
112 return self._repo.changegroupsubset(bases, heads, source)
113
113
114 class localrepository(object):
114 class localrepository(object):
115
115
116 supportedformats = set(('revlogv1', 'generaldelta'))
116 supportedformats = set(('revlogv1', 'generaldelta'))
117 supported = supportedformats | set(('store', 'fncache', 'shared',
117 supported = supportedformats | set(('store', 'fncache', 'shared',
118 'dotencode'))
118 'dotencode'))
119 openerreqs = set(('revlogv1', 'generaldelta'))
119 openerreqs = set(('revlogv1', 'generaldelta'))
120 requirements = ['revlogv1']
120 requirements = ['revlogv1']
121
121
122 def _baserequirements(self, create):
122 def _baserequirements(self, create):
123 return self.requirements[:]
123 return self.requirements[:]
124
124
125 def __init__(self, baseui, path=None, create=False):
125 def __init__(self, baseui, path=None, create=False):
126 self.wvfs = scmutil.vfs(path, expand=True)
126 self.wvfs = scmutil.vfs(path, expand=True)
127 self.wopener = self.wvfs
127 self.wopener = self.wvfs
128 self.root = self.wvfs.base
128 self.root = self.wvfs.base
129 self.path = self.wvfs.join(".hg")
129 self.path = self.wvfs.join(".hg")
130 self.origroot = path
130 self.origroot = path
131 self.auditor = scmutil.pathauditor(self.root, self._checknested)
131 self.auditor = scmutil.pathauditor(self.root, self._checknested)
132 self.vfs = scmutil.vfs(self.path)
132 self.vfs = scmutil.vfs(self.path)
133 self.opener = self.vfs
133 self.opener = self.vfs
134 self.baseui = baseui
134 self.baseui = baseui
135 self.ui = baseui.copy()
135 self.ui = baseui.copy()
136 # A list of callback to shape the phase if no data were found.
136 # A list of callback to shape the phase if no data were found.
137 # Callback are in the form: func(repo, roots) --> processed root.
137 # Callback are in the form: func(repo, roots) --> processed root.
138 # This list it to be filled by extension during repo setup
138 # This list it to be filled by extension during repo setup
139 self._phasedefaults = []
139 self._phasedefaults = []
140 try:
140 try:
141 self.ui.readconfig(self.join("hgrc"), self.root)
141 self.ui.readconfig(self.join("hgrc"), self.root)
142 extensions.loadall(self.ui)
142 extensions.loadall(self.ui)
143 except IOError:
143 except IOError:
144 pass
144 pass
145
145
146 if not self.vfs.isdir():
146 if not self.vfs.isdir():
147 if create:
147 if create:
148 if not self.wvfs.exists():
148 if not self.wvfs.exists():
149 self.wvfs.makedirs()
149 self.wvfs.makedirs()
150 self.vfs.makedir(notindexed=True)
150 self.vfs.makedir(notindexed=True)
151 requirements = self._baserequirements(create)
151 requirements = self._baserequirements(create)
152 if self.ui.configbool('format', 'usestore', True):
152 if self.ui.configbool('format', 'usestore', True):
153 self.vfs.mkdir("store")
153 self.vfs.mkdir("store")
154 requirements.append("store")
154 requirements.append("store")
155 if self.ui.configbool('format', 'usefncache', True):
155 if self.ui.configbool('format', 'usefncache', True):
156 requirements.append("fncache")
156 requirements.append("fncache")
157 if self.ui.configbool('format', 'dotencode', True):
157 if self.ui.configbool('format', 'dotencode', True):
158 requirements.append('dotencode')
158 requirements.append('dotencode')
159 # create an invalid changelog
159 # create an invalid changelog
160 self.vfs.append(
160 self.vfs.append(
161 "00changelog.i",
161 "00changelog.i",
162 '\0\0\0\2' # represents revlogv2
162 '\0\0\0\2' # represents revlogv2
163 ' dummy changelog to prevent using the old repo layout'
163 ' dummy changelog to prevent using the old repo layout'
164 )
164 )
165 if self.ui.configbool('format', 'generaldelta', False):
165 if self.ui.configbool('format', 'generaldelta', False):
166 requirements.append("generaldelta")
166 requirements.append("generaldelta")
167 requirements = set(requirements)
167 requirements = set(requirements)
168 else:
168 else:
169 raise error.RepoError(_("repository %s not found") % path)
169 raise error.RepoError(_("repository %s not found") % path)
170 elif create:
170 elif create:
171 raise error.RepoError(_("repository %s already exists") % path)
171 raise error.RepoError(_("repository %s already exists") % path)
172 else:
172 else:
173 try:
173 try:
174 requirements = scmutil.readrequires(self.vfs, self.supported)
174 requirements = scmutil.readrequires(self.vfs, self.supported)
175 except IOError, inst:
175 except IOError, inst:
176 if inst.errno != errno.ENOENT:
176 if inst.errno != errno.ENOENT:
177 raise
177 raise
178 requirements = set()
178 requirements = set()
179
179
180 self.sharedpath = self.path
180 self.sharedpath = self.path
181 try:
181 try:
182 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
182 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
183 if not os.path.exists(s):
183 if not os.path.exists(s):
184 raise error.RepoError(
184 raise error.RepoError(
185 _('.hg/sharedpath points to nonexistent directory %s') % s)
185 _('.hg/sharedpath points to nonexistent directory %s') % s)
186 self.sharedpath = s
186 self.sharedpath = s
187 except IOError, inst:
187 except IOError, inst:
188 if inst.errno != errno.ENOENT:
188 if inst.errno != errno.ENOENT:
189 raise
189 raise
190
190
191 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
191 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
192 self.spath = self.store.path
192 self.spath = self.store.path
193 self.svfs = self.store.vfs
193 self.svfs = self.store.vfs
194 self.sopener = self.svfs
194 self.sopener = self.svfs
195 self.sjoin = self.store.join
195 self.sjoin = self.store.join
196 self.vfs.createmode = self.store.createmode
196 self.vfs.createmode = self.store.createmode
197 self._applyrequirements(requirements)
197 self._applyrequirements(requirements)
198 if create:
198 if create:
199 self._writerequirements()
199 self._writerequirements()
200
200
201
201
202 self._branchcache = None
202 self._branchcache = None
203 self._branchcachetip = None
203 self._branchcachetip = None
204 self.filterpats = {}
204 self.filterpats = {}
205 self._datafilters = {}
205 self._datafilters = {}
206 self._transref = self._lockref = self._wlockref = None
206 self._transref = self._lockref = self._wlockref = None
207
207
208 # A cache for various files under .hg/ that tracks file changes,
208 # A cache for various files under .hg/ that tracks file changes,
209 # (used by the filecache decorator)
209 # (used by the filecache decorator)
210 #
210 #
211 # Maps a property name to its util.filecacheentry
211 # Maps a property name to its util.filecacheentry
212 self._filecache = {}
212 self._filecache = {}
213
213
214 def close(self):
214 def close(self):
215 pass
215 pass
216
216
217 def _restrictcapabilities(self, caps):
217 def _restrictcapabilities(self, caps):
218 return caps
218 return caps
219
219
220 def _applyrequirements(self, requirements):
220 def _applyrequirements(self, requirements):
221 self.requirements = requirements
221 self.requirements = requirements
222 self.sopener.options = dict((r, 1) for r in requirements
222 self.sopener.options = dict((r, 1) for r in requirements
223 if r in self.openerreqs)
223 if r in self.openerreqs)
224
224
225 def _writerequirements(self):
225 def _writerequirements(self):
226 reqfile = self.opener("requires", "w")
226 reqfile = self.opener("requires", "w")
227 for r in self.requirements:
227 for r in self.requirements:
228 reqfile.write("%s\n" % r)
228 reqfile.write("%s\n" % r)
229 reqfile.close()
229 reqfile.close()
230
230
231 def _checknested(self, path):
231 def _checknested(self, path):
232 """Determine if path is a legal nested repository."""
232 """Determine if path is a legal nested repository."""
233 if not path.startswith(self.root):
233 if not path.startswith(self.root):
234 return False
234 return False
235 subpath = path[len(self.root) + 1:]
235 subpath = path[len(self.root) + 1:]
236 normsubpath = util.pconvert(subpath)
236 normsubpath = util.pconvert(subpath)
237
237
238 # XXX: Checking against the current working copy is wrong in
238 # XXX: Checking against the current working copy is wrong in
239 # the sense that it can reject things like
239 # the sense that it can reject things like
240 #
240 #
241 # $ hg cat -r 10 sub/x.txt
241 # $ hg cat -r 10 sub/x.txt
242 #
242 #
243 # if sub/ is no longer a subrepository in the working copy
243 # if sub/ is no longer a subrepository in the working copy
244 # parent revision.
244 # parent revision.
245 #
245 #
246 # However, it can of course also allow things that would have
246 # However, it can of course also allow things that would have
247 # been rejected before, such as the above cat command if sub/
247 # been rejected before, such as the above cat command if sub/
248 # is a subrepository now, but was a normal directory before.
248 # is a subrepository now, but was a normal directory before.
249 # The old path auditor would have rejected by mistake since it
249 # The old path auditor would have rejected by mistake since it
250 # panics when it sees sub/.hg/.
250 # panics when it sees sub/.hg/.
251 #
251 #
252 # All in all, checking against the working copy seems sensible
252 # All in all, checking against the working copy seems sensible
253 # since we want to prevent access to nested repositories on
253 # since we want to prevent access to nested repositories on
254 # the filesystem *now*.
254 # the filesystem *now*.
255 ctx = self[None]
255 ctx = self[None]
256 parts = util.splitpath(subpath)
256 parts = util.splitpath(subpath)
257 while parts:
257 while parts:
258 prefix = '/'.join(parts)
258 prefix = '/'.join(parts)
259 if prefix in ctx.substate:
259 if prefix in ctx.substate:
260 if prefix == normsubpath:
260 if prefix == normsubpath:
261 return True
261 return True
262 else:
262 else:
263 sub = ctx.sub(prefix)
263 sub = ctx.sub(prefix)
264 return sub.checknested(subpath[len(prefix) + 1:])
264 return sub.checknested(subpath[len(prefix) + 1:])
265 else:
265 else:
266 parts.pop()
266 parts.pop()
267 return False
267 return False
268
268
269 def peer(self):
269 def peer(self):
270 return localpeer(self) # not cached to avoid reference cycle
270 return localpeer(self) # not cached to avoid reference cycle
271
271
272 def unfiltered(self):
272 def unfiltered(self):
273 """Return unfiltered version of the repository
273 """Return unfiltered version of the repository
274
274
275 Intended to be ovewritten by filtered repo."""
275 Intended to be ovewritten by filtered repo."""
276 return self
276 return self
277
277
278 @filecache('bookmarks')
278 @filecache('bookmarks')
279 def _bookmarks(self):
279 def _bookmarks(self):
280 return bookmarks.bmstore(self)
280 return bookmarks.bmstore(self)
281
281
282 @filecache('bookmarks.current')
282 @filecache('bookmarks.current')
283 def _bookmarkcurrent(self):
283 def _bookmarkcurrent(self):
284 return bookmarks.readcurrent(self)
284 return bookmarks.readcurrent(self)
285
285
286 def bookmarkheads(self, bookmark):
286 def bookmarkheads(self, bookmark):
287 name = bookmark.split('@', 1)[0]
287 name = bookmark.split('@', 1)[0]
288 heads = []
288 heads = []
289 for mark, n in self._bookmarks.iteritems():
289 for mark, n in self._bookmarks.iteritems():
290 if mark.split('@', 1)[0] == name:
290 if mark.split('@', 1)[0] == name:
291 heads.append(n)
291 heads.append(n)
292 return heads
292 return heads
293
293
294 @storecache('phaseroots')
294 @storecache('phaseroots')
295 def _phasecache(self):
295 def _phasecache(self):
296 return phases.phasecache(self, self._phasedefaults)
296 return phases.phasecache(self, self._phasedefaults)
297
297
298 @storecache('obsstore')
298 @storecache('obsstore')
299 def obsstore(self):
299 def obsstore(self):
300 store = obsolete.obsstore(self.sopener)
300 store = obsolete.obsstore(self.sopener)
301 if store and not obsolete._enabled:
301 if store and not obsolete._enabled:
302 # message is rare enough to not be translated
302 # message is rare enough to not be translated
303 msg = 'obsolete feature not enabled but %i markers found!\n'
303 msg = 'obsolete feature not enabled but %i markers found!\n'
304 self.ui.warn(msg % len(list(store)))
304 self.ui.warn(msg % len(list(store)))
305 return store
305 return store
306
306
307 @propertycache
307 @propertycache
308 def hiddenrevs(self):
308 def hiddenrevs(self):
309 """hiddenrevs: revs that should be hidden by command and tools
309 """hiddenrevs: revs that should be hidden by command and tools
310
310
311 This set is carried on the repo to ease initialization and lazy
311 This set is carried on the repo to ease initialization and lazy
312 loading; it'll probably move back to changelog for efficiency and
312 loading; it'll probably move back to changelog for efficiency and
313 consistency reasons.
313 consistency reasons.
314
314
315 Note that the hiddenrevs will needs invalidations when
315 Note that the hiddenrevs will needs invalidations when
316 - a new changesets is added (possible unstable above extinct)
316 - a new changesets is added (possible unstable above extinct)
317 - a new obsolete marker is added (possible new extinct changeset)
317 - a new obsolete marker is added (possible new extinct changeset)
318
318
319 hidden changesets cannot have non-hidden descendants
319 hidden changesets cannot have non-hidden descendants
320 """
320 """
321 hidden = set()
321 hidden = set()
322 if self.obsstore:
322 if self.obsstore:
323 ### hide extinct changeset that are not accessible by any mean
323 ### hide extinct changeset that are not accessible by any mean
324 hiddenquery = 'extinct() - ::(. + bookmark())'
324 hiddenquery = 'extinct() - ::(. + bookmark())'
325 hidden.update(self.revs(hiddenquery))
325 hidden.update(self.revs(hiddenquery))
326 return hidden
326 return hidden
327
327
328 @storecache('00changelog.i')
328 @storecache('00changelog.i')
329 def changelog(self):
329 def changelog(self):
330 c = changelog.changelog(self.sopener)
330 c = changelog.changelog(self.sopener)
331 if 'HG_PENDING' in os.environ:
331 if 'HG_PENDING' in os.environ:
332 p = os.environ['HG_PENDING']
332 p = os.environ['HG_PENDING']
333 if p.startswith(self.root):
333 if p.startswith(self.root):
334 c.readpending('00changelog.i.a')
334 c.readpending('00changelog.i.a')
335 return c
335 return c
336
336
337 @storecache('00manifest.i')
337 @storecache('00manifest.i')
338 def manifest(self):
338 def manifest(self):
339 return manifest.manifest(self.sopener)
339 return manifest.manifest(self.sopener)
340
340
341 @filecache('dirstate')
341 @filecache('dirstate')
342 def dirstate(self):
342 def dirstate(self):
343 warned = [0]
343 warned = [0]
344 def validate(node):
344 def validate(node):
345 try:
345 try:
346 self.changelog.rev(node)
346 self.changelog.rev(node)
347 return node
347 return node
348 except error.LookupError:
348 except error.LookupError:
349 if not warned[0]:
349 if not warned[0]:
350 warned[0] = True
350 warned[0] = True
351 self.ui.warn(_("warning: ignoring unknown"
351 self.ui.warn(_("warning: ignoring unknown"
352 " working parent %s!\n") % short(node))
352 " working parent %s!\n") % short(node))
353 return nullid
353 return nullid
354
354
355 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
355 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
356
356
357 def __getitem__(self, changeid):
357 def __getitem__(self, changeid):
358 if changeid is None:
358 if changeid is None:
359 return context.workingctx(self)
359 return context.workingctx(self)
360 return context.changectx(self, changeid)
360 return context.changectx(self, changeid)
361
361
362 def __contains__(self, changeid):
362 def __contains__(self, changeid):
363 try:
363 try:
364 return bool(self.lookup(changeid))
364 return bool(self.lookup(changeid))
365 except error.RepoLookupError:
365 except error.RepoLookupError:
366 return False
366 return False
367
367
368 def __nonzero__(self):
368 def __nonzero__(self):
369 return True
369 return True
370
370
371 def __len__(self):
371 def __len__(self):
372 return len(self.changelog)
372 return len(self.changelog)
373
373
374 def __iter__(self):
374 def __iter__(self):
375 return iter(self.changelog)
375 return iter(self.changelog)
376
376
377 def revs(self, expr, *args):
377 def revs(self, expr, *args):
378 '''Return a list of revisions matching the given revset'''
378 '''Return a list of revisions matching the given revset'''
379 expr = revset.formatspec(expr, *args)
379 expr = revset.formatspec(expr, *args)
380 m = revset.match(None, expr)
380 m = revset.match(None, expr)
381 return [r for r in m(self, list(self))]
381 return [r for r in m(self, list(self))]
382
382
383 def set(self, expr, *args):
383 def set(self, expr, *args):
384 '''
384 '''
385 Yield a context for each matching revision, after doing arg
385 Yield a context for each matching revision, after doing arg
386 replacement via revset.formatspec
386 replacement via revset.formatspec
387 '''
387 '''
388 for r in self.revs(expr, *args):
388 for r in self.revs(expr, *args):
389 yield self[r]
389 yield self[r]
390
390
391 def url(self):
391 def url(self):
392 return 'file:' + self.root
392 return 'file:' + self.root
393
393
394 def hook(self, name, throw=False, **args):
394 def hook(self, name, throw=False, **args):
395 return hook.hook(self.ui, self, name, throw, **args)
395 return hook.hook(self.ui, self, name, throw, **args)
396
396
397 @unfilteredmeth
397 @unfilteredmeth
398 def _tag(self, names, node, message, local, user, date, extra={}):
398 def _tag(self, names, node, message, local, user, date, extra={}):
399 if isinstance(names, str):
399 if isinstance(names, str):
400 names = (names,)
400 names = (names,)
401
401
402 branches = self.branchmap()
402 branches = self.branchmap()
403 for name in names:
403 for name in names:
404 self.hook('pretag', throw=True, node=hex(node), tag=name,
404 self.hook('pretag', throw=True, node=hex(node), tag=name,
405 local=local)
405 local=local)
406 if name in branches:
406 if name in branches:
407 self.ui.warn(_("warning: tag %s conflicts with existing"
407 self.ui.warn(_("warning: tag %s conflicts with existing"
408 " branch name\n") % name)
408 " branch name\n") % name)
409
409
410 def writetags(fp, names, munge, prevtags):
410 def writetags(fp, names, munge, prevtags):
411 fp.seek(0, 2)
411 fp.seek(0, 2)
412 if prevtags and prevtags[-1] != '\n':
412 if prevtags and prevtags[-1] != '\n':
413 fp.write('\n')
413 fp.write('\n')
414 for name in names:
414 for name in names:
415 m = munge and munge(name) or name
415 m = munge and munge(name) or name
416 if (self._tagscache.tagtypes and
416 if (self._tagscache.tagtypes and
417 name in self._tagscache.tagtypes):
417 name in self._tagscache.tagtypes):
418 old = self.tags().get(name, nullid)
418 old = self.tags().get(name, nullid)
419 fp.write('%s %s\n' % (hex(old), m))
419 fp.write('%s %s\n' % (hex(old), m))
420 fp.write('%s %s\n' % (hex(node), m))
420 fp.write('%s %s\n' % (hex(node), m))
421 fp.close()
421 fp.close()
422
422
423 prevtags = ''
423 prevtags = ''
424 if local:
424 if local:
425 try:
425 try:
426 fp = self.opener('localtags', 'r+')
426 fp = self.opener('localtags', 'r+')
427 except IOError:
427 except IOError:
428 fp = self.opener('localtags', 'a')
428 fp = self.opener('localtags', 'a')
429 else:
429 else:
430 prevtags = fp.read()
430 prevtags = fp.read()
431
431
432 # local tags are stored in the current charset
432 # local tags are stored in the current charset
433 writetags(fp, names, None, prevtags)
433 writetags(fp, names, None, prevtags)
434 for name in names:
434 for name in names:
435 self.hook('tag', node=hex(node), tag=name, local=local)
435 self.hook('tag', node=hex(node), tag=name, local=local)
436 return
436 return
437
437
438 try:
438 try:
439 fp = self.wfile('.hgtags', 'rb+')
439 fp = self.wfile('.hgtags', 'rb+')
440 except IOError, e:
440 except IOError, e:
441 if e.errno != errno.ENOENT:
441 if e.errno != errno.ENOENT:
442 raise
442 raise
443 fp = self.wfile('.hgtags', 'ab')
443 fp = self.wfile('.hgtags', 'ab')
444 else:
444 else:
445 prevtags = fp.read()
445 prevtags = fp.read()
446
446
447 # committed tags are stored in UTF-8
447 # committed tags are stored in UTF-8
448 writetags(fp, names, encoding.fromlocal, prevtags)
448 writetags(fp, names, encoding.fromlocal, prevtags)
449
449
450 fp.close()
450 fp.close()
451
451
452 self.invalidatecaches()
452 self.invalidatecaches()
453
453
454 if '.hgtags' not in self.dirstate:
454 if '.hgtags' not in self.dirstate:
455 self[None].add(['.hgtags'])
455 self[None].add(['.hgtags'])
456
456
457 m = matchmod.exact(self.root, '', ['.hgtags'])
457 m = matchmod.exact(self.root, '', ['.hgtags'])
458 tagnode = self.commit(message, user, date, extra=extra, match=m)
458 tagnode = self.commit(message, user, date, extra=extra, match=m)
459
459
460 for name in names:
460 for name in names:
461 self.hook('tag', node=hex(node), tag=name, local=local)
461 self.hook('tag', node=hex(node), tag=name, local=local)
462
462
463 return tagnode
463 return tagnode
464
464
465 def tag(self, names, node, message, local, user, date):
465 def tag(self, names, node, message, local, user, date):
466 '''tag a revision with one or more symbolic names.
466 '''tag a revision with one or more symbolic names.
467
467
468 names is a list of strings or, when adding a single tag, names may be a
468 names is a list of strings or, when adding a single tag, names may be a
469 string.
469 string.
470
470
471 if local is True, the tags are stored in a per-repository file.
471 if local is True, the tags are stored in a per-repository file.
472 otherwise, they are stored in the .hgtags file, and a new
472 otherwise, they are stored in the .hgtags file, and a new
473 changeset is committed with the change.
473 changeset is committed with the change.
474
474
475 keyword arguments:
475 keyword arguments:
476
476
477 local: whether to store tags in non-version-controlled file
477 local: whether to store tags in non-version-controlled file
478 (default False)
478 (default False)
479
479
480 message: commit message to use if committing
480 message: commit message to use if committing
481
481
482 user: name of user to use if committing
482 user: name of user to use if committing
483
483
484 date: date tuple to use if committing'''
484 date: date tuple to use if committing'''
485
485
486 if not local:
486 if not local:
487 for x in self.status()[:5]:
487 for x in self.status()[:5]:
488 if '.hgtags' in x:
488 if '.hgtags' in x:
489 raise util.Abort(_('working copy of .hgtags is changed '
489 raise util.Abort(_('working copy of .hgtags is changed '
490 '(please commit .hgtags manually)'))
490 '(please commit .hgtags manually)'))
491
491
492 self.tags() # instantiate the cache
492 self.tags() # instantiate the cache
493 self._tag(names, node, message, local, user, date)
493 self._tag(names, node, message, local, user, date)
494
494
495 @propertycache
495 @propertycache
496 def _tagscache(self):
496 def _tagscache(self):
497 '''Returns a tagscache object that contains various tags related
497 '''Returns a tagscache object that contains various tags related
498 caches.'''
498 caches.'''
499
499
500 # This simplifies its cache management by having one decorated
500 # This simplifies its cache management by having one decorated
501 # function (this one) and the rest simply fetch things from it.
501 # function (this one) and the rest simply fetch things from it.
502 class tagscache(object):
502 class tagscache(object):
503 def __init__(self):
503 def __init__(self):
504 # These two define the set of tags for this repository. tags
504 # These two define the set of tags for this repository. tags
505 # maps tag name to node; tagtypes maps tag name to 'global' or
505 # maps tag name to node; tagtypes maps tag name to 'global' or
506 # 'local'. (Global tags are defined by .hgtags across all
506 # 'local'. (Global tags are defined by .hgtags across all
507 # heads, and local tags are defined in .hg/localtags.)
507 # heads, and local tags are defined in .hg/localtags.)
508 # They constitute the in-memory cache of tags.
508 # They constitute the in-memory cache of tags.
509 self.tags = self.tagtypes = None
509 self.tags = self.tagtypes = None
510
510
511 self.nodetagscache = self.tagslist = None
511 self.nodetagscache = self.tagslist = None
512
512
513 cache = tagscache()
513 cache = tagscache()
514 cache.tags, cache.tagtypes = self._findtags()
514 cache.tags, cache.tagtypes = self._findtags()
515
515
516 return cache
516 return cache
517
517
518 def tags(self):
518 def tags(self):
519 '''return a mapping of tag to node'''
519 '''return a mapping of tag to node'''
520 t = {}
520 t = {}
521 if self.changelog.filteredrevs:
521 if self.changelog.filteredrevs:
522 tags, tt = self._findtags()
522 tags, tt = self._findtags()
523 else:
523 else:
524 tags = self._tagscache.tags
524 tags = self._tagscache.tags
525 for k, v in tags.iteritems():
525 for k, v in tags.iteritems():
526 try:
526 try:
527 # ignore tags to unknown nodes
527 # ignore tags to unknown nodes
528 self.changelog.rev(v)
528 self.changelog.rev(v)
529 t[k] = v
529 t[k] = v
530 except (error.LookupError, ValueError):
530 except (error.LookupError, ValueError):
531 pass
531 pass
532 return t
532 return t
533
533
534 def _findtags(self):
534 def _findtags(self):
535 '''Do the hard work of finding tags. Return a pair of dicts
535 '''Do the hard work of finding tags. Return a pair of dicts
536 (tags, tagtypes) where tags maps tag name to node, and tagtypes
536 (tags, tagtypes) where tags maps tag name to node, and tagtypes
537 maps tag name to a string like \'global\' or \'local\'.
537 maps tag name to a string like \'global\' or \'local\'.
538 Subclasses or extensions are free to add their own tags, but
538 Subclasses or extensions are free to add their own tags, but
539 should be aware that the returned dicts will be retained for the
539 should be aware that the returned dicts will be retained for the
540 duration of the localrepo object.'''
540 duration of the localrepo object.'''
541
541
542 # XXX what tagtype should subclasses/extensions use? Currently
542 # XXX what tagtype should subclasses/extensions use? Currently
543 # mq and bookmarks add tags, but do not set the tagtype at all.
543 # mq and bookmarks add tags, but do not set the tagtype at all.
544 # Should each extension invent its own tag type? Should there
544 # Should each extension invent its own tag type? Should there
545 # be one tagtype for all such "virtual" tags? Or is the status
545 # be one tagtype for all such "virtual" tags? Or is the status
546 # quo fine?
546 # quo fine?
547
547
548 alltags = {} # map tag name to (node, hist)
548 alltags = {} # map tag name to (node, hist)
549 tagtypes = {}
549 tagtypes = {}
550
550
551 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
551 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
552 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
552 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
553
553
554 # Build the return dicts. Have to re-encode tag names because
554 # Build the return dicts. Have to re-encode tag names because
555 # the tags module always uses UTF-8 (in order not to lose info
555 # the tags module always uses UTF-8 (in order not to lose info
556 # writing to the cache), but the rest of Mercurial wants them in
556 # writing to the cache), but the rest of Mercurial wants them in
557 # local encoding.
557 # local encoding.
558 tags = {}
558 tags = {}
559 for (name, (node, hist)) in alltags.iteritems():
559 for (name, (node, hist)) in alltags.iteritems():
560 if node != nullid:
560 if node != nullid:
561 tags[encoding.tolocal(name)] = node
561 tags[encoding.tolocal(name)] = node
562 tags['tip'] = self.changelog.tip()
562 tags['tip'] = self.changelog.tip()
563 tagtypes = dict([(encoding.tolocal(name), value)
563 tagtypes = dict([(encoding.tolocal(name), value)
564 for (name, value) in tagtypes.iteritems()])
564 for (name, value) in tagtypes.iteritems()])
565 return (tags, tagtypes)
565 return (tags, tagtypes)
566
566
567 def tagtype(self, tagname):
567 def tagtype(self, tagname):
568 '''
568 '''
569 return the type of the given tag. result can be:
569 return the type of the given tag. result can be:
570
570
571 'local' : a local tag
571 'local' : a local tag
572 'global' : a global tag
572 'global' : a global tag
573 None : tag does not exist
573 None : tag does not exist
574 '''
574 '''
575
575
576 return self._tagscache.tagtypes.get(tagname)
576 return self._tagscache.tagtypes.get(tagname)
577
577
578 def tagslist(self):
578 def tagslist(self):
579 '''return a list of tags ordered by revision'''
579 '''return a list of tags ordered by revision'''
580 if not self._tagscache.tagslist:
580 if not self._tagscache.tagslist:
581 l = []
581 l = []
582 for t, n in self.tags().iteritems():
582 for t, n in self.tags().iteritems():
583 r = self.changelog.rev(n)
583 r = self.changelog.rev(n)
584 l.append((r, t, n))
584 l.append((r, t, n))
585 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
585 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
586
586
587 return self._tagscache.tagslist
587 return self._tagscache.tagslist
588
588
589 def nodetags(self, node):
589 def nodetags(self, node):
590 '''return the tags associated with a node'''
590 '''return the tags associated with a node'''
591 if not self._tagscache.nodetagscache:
591 if not self._tagscache.nodetagscache:
592 nodetagscache = {}
592 nodetagscache = {}
593 for t, n in self._tagscache.tags.iteritems():
593 for t, n in self._tagscache.tags.iteritems():
594 nodetagscache.setdefault(n, []).append(t)
594 nodetagscache.setdefault(n, []).append(t)
595 for tags in nodetagscache.itervalues():
595 for tags in nodetagscache.itervalues():
596 tags.sort()
596 tags.sort()
597 self._tagscache.nodetagscache = nodetagscache
597 self._tagscache.nodetagscache = nodetagscache
598 return self._tagscache.nodetagscache.get(node, [])
598 return self._tagscache.nodetagscache.get(node, [])
599
599
600 def nodebookmarks(self, node):
600 def nodebookmarks(self, node):
601 marks = []
601 marks = []
602 for bookmark, n in self._bookmarks.iteritems():
602 for bookmark, n in self._bookmarks.iteritems():
603 if n == node:
603 if n == node:
604 marks.append(bookmark)
604 marks.append(bookmark)
605 return sorted(marks)
605 return sorted(marks)
606
606
607 def _branchtags(self, partial, lrev):
607 def _branchtags(self, partial, lrev):
608 # TODO: rename this function?
608 # TODO: rename this function?
609 tiprev = len(self) - 1
609 tiprev = len(self) - 1
610 if lrev != tiprev:
610 if lrev != tiprev:
611 ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
611 ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
612 self._updatebranchcache(partial, ctxgen)
612 self._updatebranchcache(partial, ctxgen)
613 self._writebranchcache(partial, self.changelog.tip(), tiprev)
613 self._writebranchcache(partial, self.changelog.tip(), tiprev)
614
614
615 return partial
615 return partial
616
616
617 @unfilteredmeth # Until we get a smarter cache management
617 @unfilteredmeth # Until we get a smarter cache management
618 def updatebranchcache(self):
618 def updatebranchcache(self):
619 tip = self.changelog.tip()
619 tip = self.changelog.tip()
620 if self._branchcache is not None and self._branchcachetip == tip:
620 if self._branchcache is not None and self._branchcachetip == tip:
621 return
621 return
622
622
623 oldtip = self._branchcachetip
623 oldtip = self._branchcachetip
624 self._branchcachetip = tip
624 self._branchcachetip = tip
625 if oldtip is None or oldtip not in self.changelog.nodemap:
625 if oldtip is None or oldtip not in self.changelog.nodemap:
626 partial, last, lrev = self._readbranchcache()
626 partial, last, lrev = self._readbranchcache()
627 else:
627 else:
628 lrev = self.changelog.rev(oldtip)
628 lrev = self.changelog.rev(oldtip)
629 partial = self._branchcache
629 partial = self._branchcache
630
630
631 self._branchtags(partial, lrev)
631 self._branchtags(partial, lrev)
632 # this private cache holds all heads (not just the branch tips)
632 # this private cache holds all heads (not just the branch tips)
633 self._branchcache = partial
633 self._branchcache = partial
634
634
635 def branchmap(self):
635 def branchmap(self):
636 '''returns a dictionary {branch: [branchheads]}'''
636 '''returns a dictionary {branch: [branchheads]}'''
637 if self.changelog.filteredrevs:
637 if self.changelog.filteredrevs:
638 # some changeset are excluded we can't use the cache
638 # some changeset are excluded we can't use the cache
639 branchmap = {}
639 branchmap = {}
640 self._updatebranchcache(branchmap, (self[r] for r in self))
640 self._updatebranchcache(branchmap, (self[r] for r in self))
641 return branchmap
641 return branchmap
642 else:
642 else:
643 self.updatebranchcache()
643 self.updatebranchcache()
644 return self._branchcache
644 return self._branchcache
645
645
646
646
647 def _branchtip(self, heads):
647 def _branchtip(self, heads):
648 '''return the tipmost branch head in heads'''
648 '''return the tipmost branch head in heads'''
649 tip = heads[-1]
649 tip = heads[-1]
650 for h in reversed(heads):
650 for h in reversed(heads):
651 if not self[h].closesbranch():
651 if not self[h].closesbranch():
652 tip = h
652 tip = h
653 break
653 break
654 return tip
654 return tip
655
655
656 def branchtip(self, branch):
656 def branchtip(self, branch):
657 '''return the tip node for a given branch'''
657 '''return the tip node for a given branch'''
658 if branch not in self.branchmap():
658 if branch not in self.branchmap():
659 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
659 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
660 return self._branchtip(self.branchmap()[branch])
660 return self._branchtip(self.branchmap()[branch])
661
661
662 def branchtags(self):
662 def branchtags(self):
663 '''return a dict where branch names map to the tipmost head of
663 '''return a dict where branch names map to the tipmost head of
664 the branch, open heads come before closed'''
664 the branch, open heads come before closed'''
665 bt = {}
665 bt = {}
666 for bn, heads in self.branchmap().iteritems():
666 for bn, heads in self.branchmap().iteritems():
667 bt[bn] = self._branchtip(heads)
667 bt[bn] = self._branchtip(heads)
668 return bt
668 return bt
669
669
670 @unfilteredmeth # Until we get a smarter cache management
670 @unfilteredmeth # Until we get a smarter cache management
671 def _readbranchcache(self):
671 def _readbranchcache(self):
672 partial = {}
672 partial = {}
673 try:
673 try:
674 f = self.opener("cache/branchheads")
674 f = self.opener("cache/branchheads")
675 lines = f.read().split('\n')
675 lines = f.read().split('\n')
676 f.close()
676 f.close()
677 except (IOError, OSError):
677 except (IOError, OSError):
678 return {}, nullid, nullrev
678 return {}, nullid, nullrev
679
679
680 try:
680 try:
681 last, lrev = lines.pop(0).split(" ", 1)
681 last, lrev = lines.pop(0).split(" ", 1)
682 last, lrev = bin(last), int(lrev)
682 last, lrev = bin(last), int(lrev)
683 if lrev >= len(self) or self[lrev].node() != last:
683 if lrev >= len(self) or self[lrev].node() != last:
684 # invalidate the cache
684 # invalidate the cache
685 raise ValueError('invalidating branch cache (tip differs)')
685 raise ValueError('invalidating branch cache (tip differs)')
686 for l in lines:
686 for l in lines:
687 if not l:
687 if not l:
688 continue
688 continue
689 node, label = l.split(" ", 1)
689 node, label = l.split(" ", 1)
690 label = encoding.tolocal(label.strip())
690 label = encoding.tolocal(label.strip())
691 if not node in self:
691 if not node in self:
692 raise ValueError('invalidating branch cache because node '+
692 raise ValueError('invalidating branch cache because node '+
693 '%s does not exist' % node)
693 '%s does not exist' % node)
694 partial.setdefault(label, []).append(bin(node))
694 partial.setdefault(label, []).append(bin(node))
695 except KeyboardInterrupt:
695 except KeyboardInterrupt:
696 raise
696 raise
697 except Exception, inst:
697 except Exception, inst:
698 if self.ui.debugflag:
698 if self.ui.debugflag:
699 self.ui.warn(str(inst), '\n')
699 self.ui.warn(str(inst), '\n')
700 partial, last, lrev = {}, nullid, nullrev
700 partial, last, lrev = {}, nullid, nullrev
701 return partial, last, lrev
701 return partial, last, lrev
702
702
703 @unfilteredmeth # Until we get a smarter cache management
703 @unfilteredmeth # Until we get a smarter cache management
704 def _writebranchcache(self, branches, tip, tiprev):
704 def _writebranchcache(self, branches, tip, tiprev):
705 try:
705 try:
706 f = self.opener("cache/branchheads", "w", atomictemp=True)
706 f = self.opener("cache/branchheads", "w", atomictemp=True)
707 f.write("%s %s\n" % (hex(tip), tiprev))
707 f.write("%s %s\n" % (hex(tip), tiprev))
708 for label, nodes in branches.iteritems():
708 for label, nodes in branches.iteritems():
709 for node in nodes:
709 for node in nodes:
710 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
710 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
711 f.close()
711 f.close()
712 except (IOError, OSError):
712 except (IOError, OSError):
713 pass
713 pass
714
714
715 @unfilteredmeth # Until we get a smarter cache management
715 @unfilteredmeth # Until we get a smarter cache management
716 def _updatebranchcache(self, partial, ctxgen):
716 def _updatebranchcache(self, partial, ctxgen):
717 """Given a branchhead cache, partial, that may have extra nodes or be
717 """Given a branchhead cache, partial, that may have extra nodes or be
718 missing heads, and a generator of nodes that are at least a superset of
718 missing heads, and a generator of nodes that are at least a superset of
719 heads missing, this function updates partial to be correct.
719 heads missing, this function updates partial to be correct.
720 """
720 """
721 # collect new branch entries
721 # collect new branch entries
722 newbranches = {}
722 newbranches = {}
723 for c in ctxgen:
723 for c in ctxgen:
724 newbranches.setdefault(c.branch(), []).append(c.node())
724 newbranches.setdefault(c.branch(), []).append(c.node())
725 # if older branchheads are reachable from new ones, they aren't
725 # if older branchheads are reachable from new ones, they aren't
726 # really branchheads. Note checking parents is insufficient:
726 # really branchheads. Note checking parents is insufficient:
727 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
727 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
728 for branch, newnodes in newbranches.iteritems():
728 for branch, newnodes in newbranches.iteritems():
729 bheads = partial.setdefault(branch, [])
729 bheads = partial.setdefault(branch, [])
730 # Remove candidate heads that no longer are in the repo (e.g., as
730 # Remove candidate heads that no longer are in the repo (e.g., as
731 # the result of a strip that just happened). Avoid using 'node in
731 # the result of a strip that just happened). Avoid using 'node in
732 # self' here because that dives down into branchcache code somewhat
732 # self' here because that dives down into branchcache code somewhat
733 # recursively.
733 # recursively.
734 bheadrevs = [self.changelog.rev(node) for node in bheads
734 bheadrevs = [self.changelog.rev(node) for node in bheads
735 if self.changelog.hasnode(node)]
735 if self.changelog.hasnode(node)]
736 newheadrevs = [self.changelog.rev(node) for node in newnodes
736 newheadrevs = [self.changelog.rev(node) for node in newnodes
737 if self.changelog.hasnode(node)]
737 if self.changelog.hasnode(node)]
738 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
738 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
739 # Remove duplicates - nodes that are in newheadrevs and are already
739 # Remove duplicates - nodes that are in newheadrevs and are already
740 # in bheadrevs. This can happen if you strip a node whose parent
740 # in bheadrevs. This can happen if you strip a node whose parent
741 # was already a head (because they're on different branches).
741 # was already a head (because they're on different branches).
742 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
742 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
743
743
744 # Starting from tip means fewer passes over reachable. If we know
744 # Starting from tip means fewer passes over reachable. If we know
745 # the new candidates are not ancestors of existing heads, we don't
745 # the new candidates are not ancestors of existing heads, we don't
746 # have to examine ancestors of existing heads
746 # have to examine ancestors of existing heads
747 if ctxisnew:
747 if ctxisnew:
748 iterrevs = sorted(newheadrevs)
748 iterrevs = sorted(newheadrevs)
749 else:
749 else:
750 iterrevs = list(bheadrevs)
750 iterrevs = list(bheadrevs)
751
751
752 # This loop prunes out two kinds of heads - heads that are
752 # This loop prunes out two kinds of heads - heads that are
753 # superseded by a head in newheadrevs, and newheadrevs that are not
753 # superseded by a head in newheadrevs, and newheadrevs that are not
754 # heads because an existing head is their descendant.
754 # heads because an existing head is their descendant.
755 while iterrevs:
755 while iterrevs:
756 latest = iterrevs.pop()
756 latest = iterrevs.pop()
757 if latest not in bheadrevs:
757 if latest not in bheadrevs:
758 continue
758 continue
759 ancestors = set(self.changelog.ancestors([latest],
759 ancestors = set(self.changelog.ancestors([latest],
760 bheadrevs[0]))
760 bheadrevs[0]))
761 if ancestors:
761 if ancestors:
762 bheadrevs = [b for b in bheadrevs if b not in ancestors]
762 bheadrevs = [b for b in bheadrevs if b not in ancestors]
763 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
763 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
764
764
765 # There may be branches that cease to exist when the last commit in the
765 # There may be branches that cease to exist when the last commit in the
766 # branch was stripped. This code filters them out. Note that the
766 # branch was stripped. This code filters them out. Note that the
767 # branch that ceased to exist may not be in newbranches because
767 # branch that ceased to exist may not be in newbranches because
768 # newbranches is the set of candidate heads, which when you strip the
768 # newbranches is the set of candidate heads, which when you strip the
769 # last commit in a branch will be the parent branch.
769 # last commit in a branch will be the parent branch.
770 for branch in partial.keys():
770 for branch in partial.keys():
771 nodes = [head for head in partial[branch]
771 nodes = [head for head in partial[branch]
772 if self.changelog.hasnode(head)]
772 if self.changelog.hasnode(head)]
773 if not nodes:
773 if not nodes:
774 del partial[branch]
774 del partial[branch]
775
775
776 def lookup(self, key):
776 def lookup(self, key):
777 return self[key].node()
777 return self[key].node()
778
778
779 def lookupbranch(self, key, remote=None):
779 def lookupbranch(self, key, remote=None):
780 repo = remote or self
780 repo = remote or self
781 if key in repo.branchmap():
781 if key in repo.branchmap():
782 return key
782 return key
783
783
784 repo = (remote and remote.local()) and remote or self
784 repo = (remote and remote.local()) and remote or self
785 return repo[key].branch()
785 return repo[key].branch()
786
786
787 def known(self, nodes):
787 def known(self, nodes):
788 nm = self.changelog.nodemap
788 nm = self.changelog.nodemap
789 pc = self._phasecache
789 pc = self._phasecache
790 result = []
790 result = []
791 for n in nodes:
791 for n in nodes:
792 r = nm.get(n)
792 r = nm.get(n)
793 resp = not (r is None or pc.phase(self, r) >= phases.secret)
793 resp = not (r is None or pc.phase(self, r) >= phases.secret)
794 result.append(resp)
794 result.append(resp)
795 return result
795 return result
796
796
797 def local(self):
797 def local(self):
798 return self
798 return self
799
799
800 def cancopy(self):
800 def cancopy(self):
801 return self.local() # so statichttprepo's override of local() works
801 return self.local() # so statichttprepo's override of local() works
802
802
803 def join(self, f):
803 def join(self, f):
804 return os.path.join(self.path, f)
804 return os.path.join(self.path, f)
805
805
806 def wjoin(self, f):
806 def wjoin(self, f):
807 return os.path.join(self.root, f)
807 return os.path.join(self.root, f)
808
808
809 def file(self, f):
809 def file(self, f):
810 if f[0] == '/':
810 if f[0] == '/':
811 f = f[1:]
811 f = f[1:]
812 return filelog.filelog(self.sopener, f)
812 return filelog.filelog(self.sopener, f)
813
813
814 def changectx(self, changeid):
814 def changectx(self, changeid):
815 return self[changeid]
815 return self[changeid]
816
816
817 def parents(self, changeid=None):
817 def parents(self, changeid=None):
818 '''get list of changectxs for parents of changeid'''
818 '''get list of changectxs for parents of changeid'''
819 return self[changeid].parents()
819 return self[changeid].parents()
820
820
821 def setparents(self, p1, p2=nullid):
821 def setparents(self, p1, p2=nullid):
822 copies = self.dirstate.setparents(p1, p2)
822 copies = self.dirstate.setparents(p1, p2)
823 if copies:
823 if copies:
824 # Adjust copy records, the dirstate cannot do it, it
824 # Adjust copy records, the dirstate cannot do it, it
825 # requires access to parents manifests. Preserve them
825 # requires access to parents manifests. Preserve them
826 # only for entries added to first parent.
826 # only for entries added to first parent.
827 pctx = self[p1]
827 pctx = self[p1]
828 for f in copies:
828 for f in copies:
829 if f not in pctx and copies[f] in pctx:
829 if f not in pctx and copies[f] in pctx:
830 self.dirstate.copy(copies[f], f)
830 self.dirstate.copy(copies[f], f)
831
831
832 def filectx(self, path, changeid=None, fileid=None):
832 def filectx(self, path, changeid=None, fileid=None):
833 """changeid can be a changeset revision, node, or tag.
833 """changeid can be a changeset revision, node, or tag.
834 fileid can be a file revision or node."""
834 fileid can be a file revision or node."""
835 return context.filectx(self, path, changeid, fileid)
835 return context.filectx(self, path, changeid, fileid)
836
836
837 def getcwd(self):
837 def getcwd(self):
838 return self.dirstate.getcwd()
838 return self.dirstate.getcwd()
839
839
840 def pathto(self, f, cwd=None):
840 def pathto(self, f, cwd=None):
841 return self.dirstate.pathto(f, cwd)
841 return self.dirstate.pathto(f, cwd)
842
842
843 def wfile(self, f, mode='r'):
843 def wfile(self, f, mode='r'):
844 return self.wopener(f, mode)
844 return self.wopener(f, mode)
845
845
846 def _link(self, f):
846 def _link(self, f):
847 return os.path.islink(self.wjoin(f))
847 return os.path.islink(self.wjoin(f))
848
848
849 def _loadfilter(self, filter):
849 def _loadfilter(self, filter):
850 if filter not in self.filterpats:
850 if filter not in self.filterpats:
851 l = []
851 l = []
852 for pat, cmd in self.ui.configitems(filter):
852 for pat, cmd in self.ui.configitems(filter):
853 if cmd == '!':
853 if cmd == '!':
854 continue
854 continue
855 mf = matchmod.match(self.root, '', [pat])
855 mf = matchmod.match(self.root, '', [pat])
856 fn = None
856 fn = None
857 params = cmd
857 params = cmd
858 for name, filterfn in self._datafilters.iteritems():
858 for name, filterfn in self._datafilters.iteritems():
859 if cmd.startswith(name):
859 if cmd.startswith(name):
860 fn = filterfn
860 fn = filterfn
861 params = cmd[len(name):].lstrip()
861 params = cmd[len(name):].lstrip()
862 break
862 break
863 if not fn:
863 if not fn:
864 fn = lambda s, c, **kwargs: util.filter(s, c)
864 fn = lambda s, c, **kwargs: util.filter(s, c)
865 # Wrap old filters not supporting keyword arguments
865 # Wrap old filters not supporting keyword arguments
866 if not inspect.getargspec(fn)[2]:
866 if not inspect.getargspec(fn)[2]:
867 oldfn = fn
867 oldfn = fn
868 fn = lambda s, c, **kwargs: oldfn(s, c)
868 fn = lambda s, c, **kwargs: oldfn(s, c)
869 l.append((mf, fn, params))
869 l.append((mf, fn, params))
870 self.filterpats[filter] = l
870 self.filterpats[filter] = l
871 return self.filterpats[filter]
871 return self.filterpats[filter]
872
872
873 def _filter(self, filterpats, filename, data):
873 def _filter(self, filterpats, filename, data):
874 for mf, fn, cmd in filterpats:
874 for mf, fn, cmd in filterpats:
875 if mf(filename):
875 if mf(filename):
876 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
876 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
877 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
877 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
878 break
878 break
879
879
880 return data
880 return data
881
881
882 @propertycache
882 @propertycache
883 def _encodefilterpats(self):
883 def _encodefilterpats(self):
884 return self._loadfilter('encode')
884 return self._loadfilter('encode')
885
885
886 @propertycache
886 @propertycache
887 def _decodefilterpats(self):
887 def _decodefilterpats(self):
888 return self._loadfilter('decode')
888 return self._loadfilter('decode')
889
889
890 def adddatafilter(self, name, filter):
890 def adddatafilter(self, name, filter):
891 self._datafilters[name] = filter
891 self._datafilters[name] = filter
892
892
893 def wread(self, filename):
893 def wread(self, filename):
894 if self._link(filename):
894 if self._link(filename):
895 data = os.readlink(self.wjoin(filename))
895 data = os.readlink(self.wjoin(filename))
896 else:
896 else:
897 data = self.wopener.read(filename)
897 data = self.wopener.read(filename)
898 return self._filter(self._encodefilterpats, filename, data)
898 return self._filter(self._encodefilterpats, filename, data)
899
899
900 def wwrite(self, filename, data, flags):
900 def wwrite(self, filename, data, flags):
901 data = self._filter(self._decodefilterpats, filename, data)
901 data = self._filter(self._decodefilterpats, filename, data)
902 if 'l' in flags:
902 if 'l' in flags:
903 self.wopener.symlink(data, filename)
903 self.wopener.symlink(data, filename)
904 else:
904 else:
905 self.wopener.write(filename, data)
905 self.wopener.write(filename, data)
906 if 'x' in flags:
906 if 'x' in flags:
907 util.setflags(self.wjoin(filename), False, True)
907 util.setflags(self.wjoin(filename), False, True)
908
908
909 def wwritedata(self, filename, data):
909 def wwritedata(self, filename, data):
910 return self._filter(self._decodefilterpats, filename, data)
910 return self._filter(self._decodefilterpats, filename, data)
911
911
912 def transaction(self, desc):
912 def transaction(self, desc):
913 tr = self._transref and self._transref() or None
913 tr = self._transref and self._transref() or None
914 if tr and tr.running():
914 if tr and tr.running():
915 return tr.nest()
915 return tr.nest()
916
916
917 # abort here if the journal already exists
917 # abort here if the journal already exists
918 if os.path.exists(self.sjoin("journal")):
918 if os.path.exists(self.sjoin("journal")):
919 raise error.RepoError(
919 raise error.RepoError(
920 _("abandoned transaction found - run hg recover"))
920 _("abandoned transaction found - run hg recover"))
921
921
922 self._writejournal(desc)
922 self._writejournal(desc)
923 renames = [(x, undoname(x)) for x in self._journalfiles()]
923 renames = [(x, undoname(x)) for x in self._journalfiles()]
924
924
925 tr = transaction.transaction(self.ui.warn, self.sopener,
925 tr = transaction.transaction(self.ui.warn, self.sopener,
926 self.sjoin("journal"),
926 self.sjoin("journal"),
927 aftertrans(renames),
927 aftertrans(renames),
928 self.store.createmode)
928 self.store.createmode)
929 self._transref = weakref.ref(tr)
929 self._transref = weakref.ref(tr)
930 return tr
930 return tr
931
931
932 def _journalfiles(self):
932 def _journalfiles(self):
933 return (self.sjoin('journal'), self.join('journal.dirstate'),
933 return (self.sjoin('journal'), self.join('journal.dirstate'),
934 self.join('journal.branch'), self.join('journal.desc'),
934 self.join('journal.branch'), self.join('journal.desc'),
935 self.join('journal.bookmarks'),
935 self.join('journal.bookmarks'),
936 self.sjoin('journal.phaseroots'))
936 self.sjoin('journal.phaseroots'))
937
937
938 def undofiles(self):
938 def undofiles(self):
939 return [undoname(x) for x in self._journalfiles()]
939 return [undoname(x) for x in self._journalfiles()]
940
940
941 def _writejournal(self, desc):
941 def _writejournal(self, desc):
942 self.opener.write("journal.dirstate",
942 self.opener.write("journal.dirstate",
943 self.opener.tryread("dirstate"))
943 self.opener.tryread("dirstate"))
944 self.opener.write("journal.branch",
944 self.opener.write("journal.branch",
945 encoding.fromlocal(self.dirstate.branch()))
945 encoding.fromlocal(self.dirstate.branch()))
946 self.opener.write("journal.desc",
946 self.opener.write("journal.desc",
947 "%d\n%s\n" % (len(self), desc))
947 "%d\n%s\n" % (len(self), desc))
948 self.opener.write("journal.bookmarks",
948 self.opener.write("journal.bookmarks",
949 self.opener.tryread("bookmarks"))
949 self.opener.tryread("bookmarks"))
950 self.sopener.write("journal.phaseroots",
950 self.sopener.write("journal.phaseroots",
951 self.sopener.tryread("phaseroots"))
951 self.sopener.tryread("phaseroots"))
952
952
953 def recover(self):
953 def recover(self):
954 lock = self.lock()
954 lock = self.lock()
955 try:
955 try:
956 if os.path.exists(self.sjoin("journal")):
956 if os.path.exists(self.sjoin("journal")):
957 self.ui.status(_("rolling back interrupted transaction\n"))
957 self.ui.status(_("rolling back interrupted transaction\n"))
958 transaction.rollback(self.sopener, self.sjoin("journal"),
958 transaction.rollback(self.sopener, self.sjoin("journal"),
959 self.ui.warn)
959 self.ui.warn)
960 self.invalidate()
960 self.invalidate()
961 return True
961 return True
962 else:
962 else:
963 self.ui.warn(_("no interrupted transaction available\n"))
963 self.ui.warn(_("no interrupted transaction available\n"))
964 return False
964 return False
965 finally:
965 finally:
966 lock.release()
966 lock.release()
967
967
968 def rollback(self, dryrun=False, force=False):
968 def rollback(self, dryrun=False, force=False):
969 wlock = lock = None
969 wlock = lock = None
970 try:
970 try:
971 wlock = self.wlock()
971 wlock = self.wlock()
972 lock = self.lock()
972 lock = self.lock()
973 if os.path.exists(self.sjoin("undo")):
973 if os.path.exists(self.sjoin("undo")):
974 return self._rollback(dryrun, force)
974 return self._rollback(dryrun, force)
975 else:
975 else:
976 self.ui.warn(_("no rollback information available\n"))
976 self.ui.warn(_("no rollback information available\n"))
977 return 1
977 return 1
978 finally:
978 finally:
979 release(lock, wlock)
979 release(lock, wlock)
980
980
981 @unfilteredmeth # Until we get smarter cache management
981 @unfilteredmeth # Until we get smarter cache management
982 def _rollback(self, dryrun, force):
982 def _rollback(self, dryrun, force):
983 ui = self.ui
983 ui = self.ui
984 try:
984 try:
985 args = self.opener.read('undo.desc').splitlines()
985 args = self.opener.read('undo.desc').splitlines()
986 (oldlen, desc, detail) = (int(args[0]), args[1], None)
986 (oldlen, desc, detail) = (int(args[0]), args[1], None)
987 if len(args) >= 3:
987 if len(args) >= 3:
988 detail = args[2]
988 detail = args[2]
989 oldtip = oldlen - 1
989 oldtip = oldlen - 1
990
990
991 if detail and ui.verbose:
991 if detail and ui.verbose:
992 msg = (_('repository tip rolled back to revision %s'
992 msg = (_('repository tip rolled back to revision %s'
993 ' (undo %s: %s)\n')
993 ' (undo %s: %s)\n')
994 % (oldtip, desc, detail))
994 % (oldtip, desc, detail))
995 else:
995 else:
996 msg = (_('repository tip rolled back to revision %s'
996 msg = (_('repository tip rolled back to revision %s'
997 ' (undo %s)\n')
997 ' (undo %s)\n')
998 % (oldtip, desc))
998 % (oldtip, desc))
999 except IOError:
999 except IOError:
1000 msg = _('rolling back unknown transaction\n')
1000 msg = _('rolling back unknown transaction\n')
1001 desc = None
1001 desc = None
1002
1002
1003 if not force and self['.'] != self['tip'] and desc == 'commit':
1003 if not force and self['.'] != self['tip'] and desc == 'commit':
1004 raise util.Abort(
1004 raise util.Abort(
1005 _('rollback of last commit while not checked out '
1005 _('rollback of last commit while not checked out '
1006 'may lose data'), hint=_('use -f to force'))
1006 'may lose data'), hint=_('use -f to force'))
1007
1007
1008 ui.status(msg)
1008 ui.status(msg)
1009 if dryrun:
1009 if dryrun:
1010 return 0
1010 return 0
1011
1011
1012 parents = self.dirstate.parents()
1012 parents = self.dirstate.parents()
1013 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1013 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1014 if os.path.exists(self.join('undo.bookmarks')):
1014 if os.path.exists(self.join('undo.bookmarks')):
1015 util.rename(self.join('undo.bookmarks'),
1015 util.rename(self.join('undo.bookmarks'),
1016 self.join('bookmarks'))
1016 self.join('bookmarks'))
1017 if os.path.exists(self.sjoin('undo.phaseroots')):
1017 if os.path.exists(self.sjoin('undo.phaseroots')):
1018 util.rename(self.sjoin('undo.phaseroots'),
1018 util.rename(self.sjoin('undo.phaseroots'),
1019 self.sjoin('phaseroots'))
1019 self.sjoin('phaseroots'))
1020 self.invalidate()
1020 self.invalidate()
1021
1021
1022 # Discard all cache entries to force reloading everything.
1022 # Discard all cache entries to force reloading everything.
1023 self._filecache.clear()
1023 self._filecache.clear()
1024
1024
1025 parentgone = (parents[0] not in self.changelog.nodemap or
1025 parentgone = (parents[0] not in self.changelog.nodemap or
1026 parents[1] not in self.changelog.nodemap)
1026 parents[1] not in self.changelog.nodemap)
1027 if parentgone:
1027 if parentgone:
1028 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1028 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1029 try:
1029 try:
1030 branch = self.opener.read('undo.branch')
1030 branch = self.opener.read('undo.branch')
1031 self.dirstate.setbranch(encoding.tolocal(branch))
1031 self.dirstate.setbranch(encoding.tolocal(branch))
1032 except IOError:
1032 except IOError:
1033 ui.warn(_('named branch could not be reset: '
1033 ui.warn(_('named branch could not be reset: '
1034 'current branch is still \'%s\'\n')
1034 'current branch is still \'%s\'\n')
1035 % self.dirstate.branch())
1035 % self.dirstate.branch())
1036
1036
1037 self.dirstate.invalidate()
1037 self.dirstate.invalidate()
1038 parents = tuple([p.rev() for p in self.parents()])
1038 parents = tuple([p.rev() for p in self.parents()])
1039 if len(parents) > 1:
1039 if len(parents) > 1:
1040 ui.status(_('working directory now based on '
1040 ui.status(_('working directory now based on '
1041 'revisions %d and %d\n') % parents)
1041 'revisions %d and %d\n') % parents)
1042 else:
1042 else:
1043 ui.status(_('working directory now based on '
1043 ui.status(_('working directory now based on '
1044 'revision %d\n') % parents)
1044 'revision %d\n') % parents)
1045 # TODO: if we know which new heads may result from this rollback, pass
1045 # TODO: if we know which new heads may result from this rollback, pass
1046 # them to destroy(), which will prevent the branchhead cache from being
1046 # them to destroy(), which will prevent the branchhead cache from being
1047 # invalidated.
1047 # invalidated.
1048 self.destroyed()
1048 self.destroyed()
1049 return 0
1049 return 0
1050
1050
1051 def invalidatecaches(self):
1051 def invalidatecaches(self):
1052 def delcache(name):
1052 def delcache(name):
1053 try:
1053 try:
1054 delattr(self, name)
1054 delattr(self, name)
1055 except AttributeError:
1055 except AttributeError:
1056 pass
1056 pass
1057
1057
1058 delcache('_tagscache')
1058 delcache('_tagscache')
1059
1059
1060 self.unfiltered()._branchcache = None # in UTF-8
1060 self.unfiltered()._branchcache = None # in UTF-8
1061 self.unfiltered()._branchcachetip = None
1061 self.unfiltered()._branchcachetip = None
1062 obsolete.clearobscaches(self)
1062 obsolete.clearobscaches(self)
1063
1063
1064 def invalidatedirstate(self):
1064 def invalidatedirstate(self):
1065 '''Invalidates the dirstate, causing the next call to dirstate
1065 '''Invalidates the dirstate, causing the next call to dirstate
1066 to check if it was modified since the last time it was read,
1066 to check if it was modified since the last time it was read,
1067 rereading it if it has.
1067 rereading it if it has.
1068
1068
1069 This is different to dirstate.invalidate() that it doesn't always
1069 This is different to dirstate.invalidate() that it doesn't always
1070 rereads the dirstate. Use dirstate.invalidate() if you want to
1070 rereads the dirstate. Use dirstate.invalidate() if you want to
1071 explicitly read the dirstate again (i.e. restoring it to a previous
1071 explicitly read the dirstate again (i.e. restoring it to a previous
1072 known good state).'''
1072 known good state).'''
1073 if 'dirstate' in self.__dict__:
1073 if 'dirstate' in self.__dict__:
1074 for k in self.dirstate._filecache:
1074 for k in self.dirstate._filecache:
1075 try:
1075 try:
1076 delattr(self.dirstate, k)
1076 delattr(self.dirstate, k)
1077 except AttributeError:
1077 except AttributeError:
1078 pass
1078 pass
1079 delattr(self.unfiltered(), 'dirstate')
1079 delattr(self.unfiltered(), 'dirstate')
1080
1080
1081 def invalidate(self):
1081 def invalidate(self):
1082 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
1082 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
1083 for k in self._filecache:
1083 for k in self._filecache:
1084 # dirstate is invalidated separately in invalidatedirstate()
1084 # dirstate is invalidated separately in invalidatedirstate()
1085 if k == 'dirstate':
1085 if k == 'dirstate':
1086 continue
1086 continue
1087
1087
1088 try:
1088 try:
1089 delattr(unfiltered, k)
1089 delattr(unfiltered, k)
1090 except AttributeError:
1090 except AttributeError:
1091 pass
1091 pass
1092 self.invalidatecaches()
1092 self.invalidatecaches()
1093
1093
1094 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1094 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1095 try:
1095 try:
1096 l = lock.lock(lockname, 0, releasefn, desc=desc)
1096 l = lock.lock(lockname, 0, releasefn, desc=desc)
1097 except error.LockHeld, inst:
1097 except error.LockHeld, inst:
1098 if not wait:
1098 if not wait:
1099 raise
1099 raise
1100 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1100 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1101 (desc, inst.locker))
1101 (desc, inst.locker))
1102 # default to 600 seconds timeout
1102 # default to 600 seconds timeout
1103 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1103 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1104 releasefn, desc=desc)
1104 releasefn, desc=desc)
1105 if acquirefn:
1105 if acquirefn:
1106 acquirefn()
1106 acquirefn()
1107 return l
1107 return l
1108
1108
1109 def _afterlock(self, callback):
1109 def _afterlock(self, callback):
1110 """add a callback to the current repository lock.
1110 """add a callback to the current repository lock.
1111
1111
1112 The callback will be executed on lock release."""
1112 The callback will be executed on lock release."""
1113 l = self._lockref and self._lockref()
1113 l = self._lockref and self._lockref()
1114 if l:
1114 if l:
1115 l.postrelease.append(callback)
1115 l.postrelease.append(callback)
1116 else:
1116 else:
1117 callback()
1117 callback()
1118
1118
1119 def lock(self, wait=True):
1119 def lock(self, wait=True):
1120 '''Lock the repository store (.hg/store) and return a weak reference
1120 '''Lock the repository store (.hg/store) and return a weak reference
1121 to the lock. Use this before modifying the store (e.g. committing or
1121 to the lock. Use this before modifying the store (e.g. committing or
1122 stripping). If you are opening a transaction, get a lock as well.)'''
1122 stripping). If you are opening a transaction, get a lock as well.)'''
1123 l = self._lockref and self._lockref()
1123 l = self._lockref and self._lockref()
1124 if l is not None and l.held:
1124 if l is not None and l.held:
1125 l.lock()
1125 l.lock()
1126 return l
1126 return l
1127
1127
1128 def unlock():
1128 def unlock():
1129 self.store.write()
1129 self.store.write()
1130 if '_phasecache' in vars(self):
1130 if '_phasecache' in vars(self):
1131 self._phasecache.write()
1131 self._phasecache.write()
1132 for k, ce in self._filecache.items():
1132 for k, ce in self._filecache.items():
1133 if k == 'dirstate':
1133 if k == 'dirstate':
1134 continue
1134 continue
1135 ce.refresh()
1135 ce.refresh()
1136
1136
1137 l = self._lock(self.sjoin("lock"), wait, unlock,
1137 l = self._lock(self.sjoin("lock"), wait, unlock,
1138 self.invalidate, _('repository %s') % self.origroot)
1138 self.invalidate, _('repository %s') % self.origroot)
1139 self._lockref = weakref.ref(l)
1139 self._lockref = weakref.ref(l)
1140 return l
1140 return l
1141
1141
1142 def wlock(self, wait=True):
1142 def wlock(self, wait=True):
1143 '''Lock the non-store parts of the repository (everything under
1143 '''Lock the non-store parts of the repository (everything under
1144 .hg except .hg/store) and return a weak reference to the lock.
1144 .hg except .hg/store) and return a weak reference to the lock.
1145 Use this before modifying files in .hg.'''
1145 Use this before modifying files in .hg.'''
1146 l = self._wlockref and self._wlockref()
1146 l = self._wlockref and self._wlockref()
1147 if l is not None and l.held:
1147 if l is not None and l.held:
1148 l.lock()
1148 l.lock()
1149 return l
1149 return l
1150
1150
1151 def unlock():
1151 def unlock():
1152 self.dirstate.write()
1152 self.dirstate.write()
1153 ce = self._filecache.get('dirstate')
1153 ce = self._filecache.get('dirstate')
1154 if ce:
1154 if ce:
1155 ce.refresh()
1155 ce.refresh()
1156
1156
1157 l = self._lock(self.join("wlock"), wait, unlock,
1157 l = self._lock(self.join("wlock"), wait, unlock,
1158 self.invalidatedirstate, _('working directory of %s') %
1158 self.invalidatedirstate, _('working directory of %s') %
1159 self.origroot)
1159 self.origroot)
1160 self._wlockref = weakref.ref(l)
1160 self._wlockref = weakref.ref(l)
1161 return l
1161 return l
1162
1162
1163 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1163 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1164 """
1164 """
1165 commit an individual file as part of a larger transaction
1165 commit an individual file as part of a larger transaction
1166 """
1166 """
1167
1167
1168 fname = fctx.path()
1168 fname = fctx.path()
1169 text = fctx.data()
1169 text = fctx.data()
1170 flog = self.file(fname)
1170 flog = self.file(fname)
1171 fparent1 = manifest1.get(fname, nullid)
1171 fparent1 = manifest1.get(fname, nullid)
1172 fparent2 = fparent2o = manifest2.get(fname, nullid)
1172 fparent2 = fparent2o = manifest2.get(fname, nullid)
1173
1173
1174 meta = {}
1174 meta = {}
1175 copy = fctx.renamed()
1175 copy = fctx.renamed()
1176 if copy and copy[0] != fname:
1176 if copy and copy[0] != fname:
1177 # Mark the new revision of this file as a copy of another
1177 # Mark the new revision of this file as a copy of another
1178 # file. This copy data will effectively act as a parent
1178 # file. This copy data will effectively act as a parent
1179 # of this new revision. If this is a merge, the first
1179 # of this new revision. If this is a merge, the first
1180 # parent will be the nullid (meaning "look up the copy data")
1180 # parent will be the nullid (meaning "look up the copy data")
1181 # and the second one will be the other parent. For example:
1181 # and the second one will be the other parent. For example:
1182 #
1182 #
1183 # 0 --- 1 --- 3 rev1 changes file foo
1183 # 0 --- 1 --- 3 rev1 changes file foo
1184 # \ / rev2 renames foo to bar and changes it
1184 # \ / rev2 renames foo to bar and changes it
1185 # \- 2 -/ rev3 should have bar with all changes and
1185 # \- 2 -/ rev3 should have bar with all changes and
1186 # should record that bar descends from
1186 # should record that bar descends from
1187 # bar in rev2 and foo in rev1
1187 # bar in rev2 and foo in rev1
1188 #
1188 #
1189 # this allows this merge to succeed:
1189 # this allows this merge to succeed:
1190 #
1190 #
1191 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1191 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1192 # \ / merging rev3 and rev4 should use bar@rev2
1192 # \ / merging rev3 and rev4 should use bar@rev2
1193 # \- 2 --- 4 as the merge base
1193 # \- 2 --- 4 as the merge base
1194 #
1194 #
1195
1195
1196 cfname = copy[0]
1196 cfname = copy[0]
1197 crev = manifest1.get(cfname)
1197 crev = manifest1.get(cfname)
1198 newfparent = fparent2
1198 newfparent = fparent2
1199
1199
1200 if manifest2: # branch merge
1200 if manifest2: # branch merge
1201 if fparent2 == nullid or crev is None: # copied on remote side
1201 if fparent2 == nullid or crev is None: # copied on remote side
1202 if cfname in manifest2:
1202 if cfname in manifest2:
1203 crev = manifest2[cfname]
1203 crev = manifest2[cfname]
1204 newfparent = fparent1
1204 newfparent = fparent1
1205
1205
1206 # find source in nearest ancestor if we've lost track
1206 # find source in nearest ancestor if we've lost track
1207 if not crev:
1207 if not crev:
1208 self.ui.debug(" %s: searching for copy revision for %s\n" %
1208 self.ui.debug(" %s: searching for copy revision for %s\n" %
1209 (fname, cfname))
1209 (fname, cfname))
1210 for ancestor in self[None].ancestors():
1210 for ancestor in self[None].ancestors():
1211 if cfname in ancestor:
1211 if cfname in ancestor:
1212 crev = ancestor[cfname].filenode()
1212 crev = ancestor[cfname].filenode()
1213 break
1213 break
1214
1214
1215 if crev:
1215 if crev:
1216 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1216 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1217 meta["copy"] = cfname
1217 meta["copy"] = cfname
1218 meta["copyrev"] = hex(crev)
1218 meta["copyrev"] = hex(crev)
1219 fparent1, fparent2 = nullid, newfparent
1219 fparent1, fparent2 = nullid, newfparent
1220 else:
1220 else:
1221 self.ui.warn(_("warning: can't find ancestor for '%s' "
1221 self.ui.warn(_("warning: can't find ancestor for '%s' "
1222 "copied from '%s'!\n") % (fname, cfname))
1222 "copied from '%s'!\n") % (fname, cfname))
1223
1223
1224 elif fparent2 != nullid:
1224 elif fparent2 != nullid:
1225 # is one parent an ancestor of the other?
1225 # is one parent an ancestor of the other?
1226 fparentancestor = flog.ancestor(fparent1, fparent2)
1226 fparentancestor = flog.ancestor(fparent1, fparent2)
1227 if fparentancestor == fparent1:
1227 if fparentancestor == fparent1:
1228 fparent1, fparent2 = fparent2, nullid
1228 fparent1, fparent2 = fparent2, nullid
1229 elif fparentancestor == fparent2:
1229 elif fparentancestor == fparent2:
1230 fparent2 = nullid
1230 fparent2 = nullid
1231
1231
1232 # is the file changed?
1232 # is the file changed?
1233 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1233 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1234 changelist.append(fname)
1234 changelist.append(fname)
1235 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1235 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1236
1236
1237 # are just the flags changed during merge?
1237 # are just the flags changed during merge?
1238 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1238 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1239 changelist.append(fname)
1239 changelist.append(fname)
1240
1240
1241 return fparent1
1241 return fparent1
1242
1242
1243 @unfilteredmeth
1243 def commit(self, text="", user=None, date=None, match=None, force=False,
1244 def commit(self, text="", user=None, date=None, match=None, force=False,
1244 editor=False, extra={}):
1245 editor=False, extra={}):
1245 """Add a new revision to current repository.
1246 """Add a new revision to current repository.
1246
1247
1247 Revision information is gathered from the working directory,
1248 Revision information is gathered from the working directory,
1248 match can be used to filter the committed files. If editor is
1249 match can be used to filter the committed files. If editor is
1249 supplied, it is called to get a commit message.
1250 supplied, it is called to get a commit message.
1250 """
1251 """
1251
1252
1252 def fail(f, msg):
1253 def fail(f, msg):
1253 raise util.Abort('%s: %s' % (f, msg))
1254 raise util.Abort('%s: %s' % (f, msg))
1254
1255
1255 if not match:
1256 if not match:
1256 match = matchmod.always(self.root, '')
1257 match = matchmod.always(self.root, '')
1257
1258
1258 if not force:
1259 if not force:
1259 vdirs = []
1260 vdirs = []
1260 match.dir = vdirs.append
1261 match.dir = vdirs.append
1261 match.bad = fail
1262 match.bad = fail
1262
1263
1263 wlock = self.wlock()
1264 wlock = self.wlock()
1264 try:
1265 try:
1265 wctx = self[None]
1266 wctx = self[None]
1266 merge = len(wctx.parents()) > 1
1267 merge = len(wctx.parents()) > 1
1267
1268
1268 if (not force and merge and match and
1269 if (not force and merge and match and
1269 (match.files() or match.anypats())):
1270 (match.files() or match.anypats())):
1270 raise util.Abort(_('cannot partially commit a merge '
1271 raise util.Abort(_('cannot partially commit a merge '
1271 '(do not specify files or patterns)'))
1272 '(do not specify files or patterns)'))
1272
1273
1273 changes = self.status(match=match, clean=force)
1274 changes = self.status(match=match, clean=force)
1274 if force:
1275 if force:
1275 changes[0].extend(changes[6]) # mq may commit unchanged files
1276 changes[0].extend(changes[6]) # mq may commit unchanged files
1276
1277
1277 # check subrepos
1278 # check subrepos
1278 subs = []
1279 subs = []
1279 commitsubs = set()
1280 commitsubs = set()
1280 newstate = wctx.substate.copy()
1281 newstate = wctx.substate.copy()
1281 # only manage subrepos and .hgsubstate if .hgsub is present
1282 # only manage subrepos and .hgsubstate if .hgsub is present
1282 if '.hgsub' in wctx:
1283 if '.hgsub' in wctx:
1283 # we'll decide whether to track this ourselves, thanks
1284 # we'll decide whether to track this ourselves, thanks
1284 if '.hgsubstate' in changes[0]:
1285 if '.hgsubstate' in changes[0]:
1285 changes[0].remove('.hgsubstate')
1286 changes[0].remove('.hgsubstate')
1286 if '.hgsubstate' in changes[2]:
1287 if '.hgsubstate' in changes[2]:
1287 changes[2].remove('.hgsubstate')
1288 changes[2].remove('.hgsubstate')
1288
1289
1289 # compare current state to last committed state
1290 # compare current state to last committed state
1290 # build new substate based on last committed state
1291 # build new substate based on last committed state
1291 oldstate = wctx.p1().substate
1292 oldstate = wctx.p1().substate
1292 for s in sorted(newstate.keys()):
1293 for s in sorted(newstate.keys()):
1293 if not match(s):
1294 if not match(s):
1294 # ignore working copy, use old state if present
1295 # ignore working copy, use old state if present
1295 if s in oldstate:
1296 if s in oldstate:
1296 newstate[s] = oldstate[s]
1297 newstate[s] = oldstate[s]
1297 continue
1298 continue
1298 if not force:
1299 if not force:
1299 raise util.Abort(
1300 raise util.Abort(
1300 _("commit with new subrepo %s excluded") % s)
1301 _("commit with new subrepo %s excluded") % s)
1301 if wctx.sub(s).dirty(True):
1302 if wctx.sub(s).dirty(True):
1302 if not self.ui.configbool('ui', 'commitsubrepos'):
1303 if not self.ui.configbool('ui', 'commitsubrepos'):
1303 raise util.Abort(
1304 raise util.Abort(
1304 _("uncommitted changes in subrepo %s") % s,
1305 _("uncommitted changes in subrepo %s") % s,
1305 hint=_("use --subrepos for recursive commit"))
1306 hint=_("use --subrepos for recursive commit"))
1306 subs.append(s)
1307 subs.append(s)
1307 commitsubs.add(s)
1308 commitsubs.add(s)
1308 else:
1309 else:
1309 bs = wctx.sub(s).basestate()
1310 bs = wctx.sub(s).basestate()
1310 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1311 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1311 if oldstate.get(s, (None, None, None))[1] != bs:
1312 if oldstate.get(s, (None, None, None))[1] != bs:
1312 subs.append(s)
1313 subs.append(s)
1313
1314
1314 # check for removed subrepos
1315 # check for removed subrepos
1315 for p in wctx.parents():
1316 for p in wctx.parents():
1316 r = [s for s in p.substate if s not in newstate]
1317 r = [s for s in p.substate if s not in newstate]
1317 subs += [s for s in r if match(s)]
1318 subs += [s for s in r if match(s)]
1318 if subs:
1319 if subs:
1319 if (not match('.hgsub') and
1320 if (not match('.hgsub') and
1320 '.hgsub' in (wctx.modified() + wctx.added())):
1321 '.hgsub' in (wctx.modified() + wctx.added())):
1321 raise util.Abort(
1322 raise util.Abort(
1322 _("can't commit subrepos without .hgsub"))
1323 _("can't commit subrepos without .hgsub"))
1323 changes[0].insert(0, '.hgsubstate')
1324 changes[0].insert(0, '.hgsubstate')
1324
1325
1325 elif '.hgsub' in changes[2]:
1326 elif '.hgsub' in changes[2]:
1326 # clean up .hgsubstate when .hgsub is removed
1327 # clean up .hgsubstate when .hgsub is removed
1327 if ('.hgsubstate' in wctx and
1328 if ('.hgsubstate' in wctx and
1328 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1329 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1329 changes[2].insert(0, '.hgsubstate')
1330 changes[2].insert(0, '.hgsubstate')
1330
1331
1331 # make sure all explicit patterns are matched
1332 # make sure all explicit patterns are matched
1332 if not force and match.files():
1333 if not force and match.files():
1333 matched = set(changes[0] + changes[1] + changes[2])
1334 matched = set(changes[0] + changes[1] + changes[2])
1334
1335
1335 for f in match.files():
1336 for f in match.files():
1336 f = self.dirstate.normalize(f)
1337 f = self.dirstate.normalize(f)
1337 if f == '.' or f in matched or f in wctx.substate:
1338 if f == '.' or f in matched or f in wctx.substate:
1338 continue
1339 continue
1339 if f in changes[3]: # missing
1340 if f in changes[3]: # missing
1340 fail(f, _('file not found!'))
1341 fail(f, _('file not found!'))
1341 if f in vdirs: # visited directory
1342 if f in vdirs: # visited directory
1342 d = f + '/'
1343 d = f + '/'
1343 for mf in matched:
1344 for mf in matched:
1344 if mf.startswith(d):
1345 if mf.startswith(d):
1345 break
1346 break
1346 else:
1347 else:
1347 fail(f, _("no match under directory!"))
1348 fail(f, _("no match under directory!"))
1348 elif f not in self.dirstate:
1349 elif f not in self.dirstate:
1349 fail(f, _("file not tracked!"))
1350 fail(f, _("file not tracked!"))
1350
1351
1351 if (not force and not extra.get("close") and not merge
1352 if (not force and not extra.get("close") and not merge
1352 and not (changes[0] or changes[1] or changes[2])
1353 and not (changes[0] or changes[1] or changes[2])
1353 and wctx.branch() == wctx.p1().branch()):
1354 and wctx.branch() == wctx.p1().branch()):
1354 return None
1355 return None
1355
1356
1356 if merge and changes[3]:
1357 if merge and changes[3]:
1357 raise util.Abort(_("cannot commit merge with missing files"))
1358 raise util.Abort(_("cannot commit merge with missing files"))
1358
1359
1359 ms = mergemod.mergestate(self)
1360 ms = mergemod.mergestate(self)
1360 for f in changes[0]:
1361 for f in changes[0]:
1361 if f in ms and ms[f] == 'u':
1362 if f in ms and ms[f] == 'u':
1362 raise util.Abort(_("unresolved merge conflicts "
1363 raise util.Abort(_("unresolved merge conflicts "
1363 "(see hg help resolve)"))
1364 "(see hg help resolve)"))
1364
1365
1365 cctx = context.workingctx(self, text, user, date, extra, changes)
1366 cctx = context.workingctx(self, text, user, date, extra, changes)
1366 if editor:
1367 if editor:
1367 cctx._text = editor(self, cctx, subs)
1368 cctx._text = editor(self, cctx, subs)
1368 edited = (text != cctx._text)
1369 edited = (text != cctx._text)
1369
1370
1370 # commit subs and write new state
1371 # commit subs and write new state
1371 if subs:
1372 if subs:
1372 for s in sorted(commitsubs):
1373 for s in sorted(commitsubs):
1373 sub = wctx.sub(s)
1374 sub = wctx.sub(s)
1374 self.ui.status(_('committing subrepository %s\n') %
1375 self.ui.status(_('committing subrepository %s\n') %
1375 subrepo.subrelpath(sub))
1376 subrepo.subrelpath(sub))
1376 sr = sub.commit(cctx._text, user, date)
1377 sr = sub.commit(cctx._text, user, date)
1377 newstate[s] = (newstate[s][0], sr)
1378 newstate[s] = (newstate[s][0], sr)
1378 subrepo.writestate(self, newstate)
1379 subrepo.writestate(self, newstate)
1379
1380
1380 # Save commit message in case this transaction gets rolled back
1381 # Save commit message in case this transaction gets rolled back
1381 # (e.g. by a pretxncommit hook). Leave the content alone on
1382 # (e.g. by a pretxncommit hook). Leave the content alone on
1382 # the assumption that the user will use the same editor again.
1383 # the assumption that the user will use the same editor again.
1383 msgfn = self.savecommitmessage(cctx._text)
1384 msgfn = self.savecommitmessage(cctx._text)
1384
1385
1385 p1, p2 = self.dirstate.parents()
1386 p1, p2 = self.dirstate.parents()
1386 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1387 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1387 try:
1388 try:
1388 self.hook("precommit", throw=True, parent1=hookp1,
1389 self.hook("precommit", throw=True, parent1=hookp1,
1389 parent2=hookp2)
1390 parent2=hookp2)
1390 ret = self.commitctx(cctx, True)
1391 ret = self.commitctx(cctx, True)
1391 except: # re-raises
1392 except: # re-raises
1392 if edited:
1393 if edited:
1393 self.ui.write(
1394 self.ui.write(
1394 _('note: commit message saved in %s\n') % msgfn)
1395 _('note: commit message saved in %s\n') % msgfn)
1395 raise
1396 raise
1396
1397
1397 # update bookmarks, dirstate and mergestate
1398 # update bookmarks, dirstate and mergestate
1398 bookmarks.update(self, [p1, p2], ret)
1399 bookmarks.update(self, [p1, p2], ret)
1399 for f in changes[0] + changes[1]:
1400 for f in changes[0] + changes[1]:
1400 self.dirstate.normal(f)
1401 self.dirstate.normal(f)
1401 for f in changes[2]:
1402 for f in changes[2]:
1402 self.dirstate.drop(f)
1403 self.dirstate.drop(f)
1403 self.dirstate.setparents(ret)
1404 self.dirstate.setparents(ret)
1404 ms.reset()
1405 ms.reset()
1405 finally:
1406 finally:
1406 wlock.release()
1407 wlock.release()
1407
1408
1408 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1409 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1409 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1410 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1410 self._afterlock(commithook)
1411 self._afterlock(commithook)
1411 return ret
1412 return ret
1412
1413
1414 @unfilteredmeth
1413 def commitctx(self, ctx, error=False):
1415 def commitctx(self, ctx, error=False):
1414 """Add a new revision to current repository.
1416 """Add a new revision to current repository.
1415 Revision information is passed via the context argument.
1417 Revision information is passed via the context argument.
1416 """
1418 """
1417
1419
1418 tr = lock = None
1420 tr = lock = None
1419 removed = list(ctx.removed())
1421 removed = list(ctx.removed())
1420 p1, p2 = ctx.p1(), ctx.p2()
1422 p1, p2 = ctx.p1(), ctx.p2()
1421 user = ctx.user()
1423 user = ctx.user()
1422
1424
1423 lock = self.lock()
1425 lock = self.lock()
1424 try:
1426 try:
1425 tr = self.transaction("commit")
1427 tr = self.transaction("commit")
1426 trp = weakref.proxy(tr)
1428 trp = weakref.proxy(tr)
1427
1429
1428 if ctx.files():
1430 if ctx.files():
1429 m1 = p1.manifest().copy()
1431 m1 = p1.manifest().copy()
1430 m2 = p2.manifest()
1432 m2 = p2.manifest()
1431
1433
1432 # check in files
1434 # check in files
1433 new = {}
1435 new = {}
1434 changed = []
1436 changed = []
1435 linkrev = len(self)
1437 linkrev = len(self)
1436 for f in sorted(ctx.modified() + ctx.added()):
1438 for f in sorted(ctx.modified() + ctx.added()):
1437 self.ui.note(f + "\n")
1439 self.ui.note(f + "\n")
1438 try:
1440 try:
1439 fctx = ctx[f]
1441 fctx = ctx[f]
1440 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1442 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1441 changed)
1443 changed)
1442 m1.set(f, fctx.flags())
1444 m1.set(f, fctx.flags())
1443 except OSError, inst:
1445 except OSError, inst:
1444 self.ui.warn(_("trouble committing %s!\n") % f)
1446 self.ui.warn(_("trouble committing %s!\n") % f)
1445 raise
1447 raise
1446 except IOError, inst:
1448 except IOError, inst:
1447 errcode = getattr(inst, 'errno', errno.ENOENT)
1449 errcode = getattr(inst, 'errno', errno.ENOENT)
1448 if error or errcode and errcode != errno.ENOENT:
1450 if error or errcode and errcode != errno.ENOENT:
1449 self.ui.warn(_("trouble committing %s!\n") % f)
1451 self.ui.warn(_("trouble committing %s!\n") % f)
1450 raise
1452 raise
1451 else:
1453 else:
1452 removed.append(f)
1454 removed.append(f)
1453
1455
1454 # update manifest
1456 # update manifest
1455 m1.update(new)
1457 m1.update(new)
1456 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1458 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1457 drop = [f for f in removed if f in m1]
1459 drop = [f for f in removed if f in m1]
1458 for f in drop:
1460 for f in drop:
1459 del m1[f]
1461 del m1[f]
1460 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1462 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1461 p2.manifestnode(), (new, drop))
1463 p2.manifestnode(), (new, drop))
1462 files = changed + removed
1464 files = changed + removed
1463 else:
1465 else:
1464 mn = p1.manifestnode()
1466 mn = p1.manifestnode()
1465 files = []
1467 files = []
1466
1468
1467 # update changelog
1469 # update changelog
1468 self.changelog.delayupdate()
1470 self.changelog.delayupdate()
1469 n = self.changelog.add(mn, files, ctx.description(),
1471 n = self.changelog.add(mn, files, ctx.description(),
1470 trp, p1.node(), p2.node(),
1472 trp, p1.node(), p2.node(),
1471 user, ctx.date(), ctx.extra().copy())
1473 user, ctx.date(), ctx.extra().copy())
1472 p = lambda: self.changelog.writepending() and self.root or ""
1474 p = lambda: self.changelog.writepending() and self.root or ""
1473 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1475 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1474 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1476 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1475 parent2=xp2, pending=p)
1477 parent2=xp2, pending=p)
1476 self.changelog.finalize(trp)
1478 self.changelog.finalize(trp)
1477 # set the new commit is proper phase
1479 # set the new commit is proper phase
1478 targetphase = phases.newcommitphase(self.ui)
1480 targetphase = phases.newcommitphase(self.ui)
1479 if targetphase:
1481 if targetphase:
1480 # retract boundary do not alter parent changeset.
1482 # retract boundary do not alter parent changeset.
1481 # if a parent have higher the resulting phase will
1483 # if a parent have higher the resulting phase will
1482 # be compliant anyway
1484 # be compliant anyway
1483 #
1485 #
1484 # if minimal phase was 0 we don't need to retract anything
1486 # if minimal phase was 0 we don't need to retract anything
1485 phases.retractboundary(self, targetphase, [n])
1487 phases.retractboundary(self, targetphase, [n])
1486 tr.close()
1488 tr.close()
1487 self.updatebranchcache()
1489 self.updatebranchcache()
1488 return n
1490 return n
1489 finally:
1491 finally:
1490 if tr:
1492 if tr:
1491 tr.release()
1493 tr.release()
1492 lock.release()
1494 lock.release()
1493
1495
1494 @unfilteredmeth
1496 @unfilteredmeth
1495 def destroyed(self, newheadnodes=None):
1497 def destroyed(self, newheadnodes=None):
1496 '''Inform the repository that nodes have been destroyed.
1498 '''Inform the repository that nodes have been destroyed.
1497 Intended for use by strip and rollback, so there's a common
1499 Intended for use by strip and rollback, so there's a common
1498 place for anything that has to be done after destroying history.
1500 place for anything that has to be done after destroying history.
1499
1501
1500 If you know the branchheadcache was uptodate before nodes were removed
1502 If you know the branchheadcache was uptodate before nodes were removed
1501 and you also know the set of candidate new heads that may have resulted
1503 and you also know the set of candidate new heads that may have resulted
1502 from the destruction, you can set newheadnodes. This will enable the
1504 from the destruction, you can set newheadnodes. This will enable the
1503 code to update the branchheads cache, rather than having future code
1505 code to update the branchheads cache, rather than having future code
1504 decide it's invalid and regenerating it from scratch.
1506 decide it's invalid and regenerating it from scratch.
1505 '''
1507 '''
1506 # If we have info, newheadnodes, on how to update the branch cache, do
1508 # If we have info, newheadnodes, on how to update the branch cache, do
1507 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1509 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1508 # will be caught the next time it is read.
1510 # will be caught the next time it is read.
1509 if newheadnodes:
1511 if newheadnodes:
1510 tiprev = len(self) - 1
1512 tiprev = len(self) - 1
1511 ctxgen = (self[node] for node in newheadnodes
1513 ctxgen = (self[node] for node in newheadnodes
1512 if self.changelog.hasnode(node))
1514 if self.changelog.hasnode(node))
1513 self._updatebranchcache(self._branchcache, ctxgen)
1515 self._updatebranchcache(self._branchcache, ctxgen)
1514 self._writebranchcache(self._branchcache, self.changelog.tip(),
1516 self._writebranchcache(self._branchcache, self.changelog.tip(),
1515 tiprev)
1517 tiprev)
1516
1518
1517 # Ensure the persistent tag cache is updated. Doing it now
1519 # Ensure the persistent tag cache is updated. Doing it now
1518 # means that the tag cache only has to worry about destroyed
1520 # means that the tag cache only has to worry about destroyed
1519 # heads immediately after a strip/rollback. That in turn
1521 # heads immediately after a strip/rollback. That in turn
1520 # guarantees that "cachetip == currenttip" (comparing both rev
1522 # guarantees that "cachetip == currenttip" (comparing both rev
1521 # and node) always means no nodes have been added or destroyed.
1523 # and node) always means no nodes have been added or destroyed.
1522
1524
1523 # XXX this is suboptimal when qrefresh'ing: we strip the current
1525 # XXX this is suboptimal when qrefresh'ing: we strip the current
1524 # head, refresh the tag cache, then immediately add a new head.
1526 # head, refresh the tag cache, then immediately add a new head.
1525 # But I think doing it this way is necessary for the "instant
1527 # But I think doing it this way is necessary for the "instant
1526 # tag cache retrieval" case to work.
1528 # tag cache retrieval" case to work.
1527 self.invalidatecaches()
1529 self.invalidatecaches()
1528
1530
1529 # Discard all cache entries to force reloading everything.
1531 # Discard all cache entries to force reloading everything.
1530 self._filecache.clear()
1532 self._filecache.clear()
1531
1533
1532 def walk(self, match, node=None):
1534 def walk(self, match, node=None):
1533 '''
1535 '''
1534 walk recursively through the directory tree or a given
1536 walk recursively through the directory tree or a given
1535 changeset, finding all files matched by the match
1537 changeset, finding all files matched by the match
1536 function
1538 function
1537 '''
1539 '''
1538 return self[node].walk(match)
1540 return self[node].walk(match)
1539
1541
1540 def status(self, node1='.', node2=None, match=None,
1542 def status(self, node1='.', node2=None, match=None,
1541 ignored=False, clean=False, unknown=False,
1543 ignored=False, clean=False, unknown=False,
1542 listsubrepos=False):
1544 listsubrepos=False):
1543 """return status of files between two nodes or node and working
1545 """return status of files between two nodes or node and working
1544 directory.
1546 directory.
1545
1547
1546 If node1 is None, use the first dirstate parent instead.
1548 If node1 is None, use the first dirstate parent instead.
1547 If node2 is None, compare node1 with working directory.
1549 If node2 is None, compare node1 with working directory.
1548 """
1550 """
1549
1551
1550 def mfmatches(ctx):
1552 def mfmatches(ctx):
1551 mf = ctx.manifest().copy()
1553 mf = ctx.manifest().copy()
1552 if match.always():
1554 if match.always():
1553 return mf
1555 return mf
1554 for fn in mf.keys():
1556 for fn in mf.keys():
1555 if not match(fn):
1557 if not match(fn):
1556 del mf[fn]
1558 del mf[fn]
1557 return mf
1559 return mf
1558
1560
1559 if isinstance(node1, context.changectx):
1561 if isinstance(node1, context.changectx):
1560 ctx1 = node1
1562 ctx1 = node1
1561 else:
1563 else:
1562 ctx1 = self[node1]
1564 ctx1 = self[node1]
1563 if isinstance(node2, context.changectx):
1565 if isinstance(node2, context.changectx):
1564 ctx2 = node2
1566 ctx2 = node2
1565 else:
1567 else:
1566 ctx2 = self[node2]
1568 ctx2 = self[node2]
1567
1569
1568 working = ctx2.rev() is None
1570 working = ctx2.rev() is None
1569 parentworking = working and ctx1 == self['.']
1571 parentworking = working and ctx1 == self['.']
1570 match = match or matchmod.always(self.root, self.getcwd())
1572 match = match or matchmod.always(self.root, self.getcwd())
1571 listignored, listclean, listunknown = ignored, clean, unknown
1573 listignored, listclean, listunknown = ignored, clean, unknown
1572
1574
1573 # load earliest manifest first for caching reasons
1575 # load earliest manifest first for caching reasons
1574 if not working and ctx2.rev() < ctx1.rev():
1576 if not working and ctx2.rev() < ctx1.rev():
1575 ctx2.manifest()
1577 ctx2.manifest()
1576
1578
1577 if not parentworking:
1579 if not parentworking:
1578 def bad(f, msg):
1580 def bad(f, msg):
1579 # 'f' may be a directory pattern from 'match.files()',
1581 # 'f' may be a directory pattern from 'match.files()',
1580 # so 'f not in ctx1' is not enough
1582 # so 'f not in ctx1' is not enough
1581 if f not in ctx1 and f not in ctx1.dirs():
1583 if f not in ctx1 and f not in ctx1.dirs():
1582 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1584 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1583 match.bad = bad
1585 match.bad = bad
1584
1586
1585 if working: # we need to scan the working dir
1587 if working: # we need to scan the working dir
1586 subrepos = []
1588 subrepos = []
1587 if '.hgsub' in self.dirstate:
1589 if '.hgsub' in self.dirstate:
1588 subrepos = ctx2.substate.keys()
1590 subrepos = ctx2.substate.keys()
1589 s = self.dirstate.status(match, subrepos, listignored,
1591 s = self.dirstate.status(match, subrepos, listignored,
1590 listclean, listunknown)
1592 listclean, listunknown)
1591 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1593 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1592
1594
1593 # check for any possibly clean files
1595 # check for any possibly clean files
1594 if parentworking and cmp:
1596 if parentworking and cmp:
1595 fixup = []
1597 fixup = []
1596 # do a full compare of any files that might have changed
1598 # do a full compare of any files that might have changed
1597 for f in sorted(cmp):
1599 for f in sorted(cmp):
1598 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1600 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1599 or ctx1[f].cmp(ctx2[f])):
1601 or ctx1[f].cmp(ctx2[f])):
1600 modified.append(f)
1602 modified.append(f)
1601 else:
1603 else:
1602 fixup.append(f)
1604 fixup.append(f)
1603
1605
1604 # update dirstate for files that are actually clean
1606 # update dirstate for files that are actually clean
1605 if fixup:
1607 if fixup:
1606 if listclean:
1608 if listclean:
1607 clean += fixup
1609 clean += fixup
1608
1610
1609 try:
1611 try:
1610 # updating the dirstate is optional
1612 # updating the dirstate is optional
1611 # so we don't wait on the lock
1613 # so we don't wait on the lock
1612 wlock = self.wlock(False)
1614 wlock = self.wlock(False)
1613 try:
1615 try:
1614 for f in fixup:
1616 for f in fixup:
1615 self.dirstate.normal(f)
1617 self.dirstate.normal(f)
1616 finally:
1618 finally:
1617 wlock.release()
1619 wlock.release()
1618 except error.LockError:
1620 except error.LockError:
1619 pass
1621 pass
1620
1622
1621 if not parentworking:
1623 if not parentworking:
1622 mf1 = mfmatches(ctx1)
1624 mf1 = mfmatches(ctx1)
1623 if working:
1625 if working:
1624 # we are comparing working dir against non-parent
1626 # we are comparing working dir against non-parent
1625 # generate a pseudo-manifest for the working dir
1627 # generate a pseudo-manifest for the working dir
1626 mf2 = mfmatches(self['.'])
1628 mf2 = mfmatches(self['.'])
1627 for f in cmp + modified + added:
1629 for f in cmp + modified + added:
1628 mf2[f] = None
1630 mf2[f] = None
1629 mf2.set(f, ctx2.flags(f))
1631 mf2.set(f, ctx2.flags(f))
1630 for f in removed:
1632 for f in removed:
1631 if f in mf2:
1633 if f in mf2:
1632 del mf2[f]
1634 del mf2[f]
1633 else:
1635 else:
1634 # we are comparing two revisions
1636 # we are comparing two revisions
1635 deleted, unknown, ignored = [], [], []
1637 deleted, unknown, ignored = [], [], []
1636 mf2 = mfmatches(ctx2)
1638 mf2 = mfmatches(ctx2)
1637
1639
1638 modified, added, clean = [], [], []
1640 modified, added, clean = [], [], []
1639 withflags = mf1.withflags() | mf2.withflags()
1641 withflags = mf1.withflags() | mf2.withflags()
1640 for fn in mf2:
1642 for fn in mf2:
1641 if fn in mf1:
1643 if fn in mf1:
1642 if (fn not in deleted and
1644 if (fn not in deleted and
1643 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1645 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1644 (mf1[fn] != mf2[fn] and
1646 (mf1[fn] != mf2[fn] and
1645 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1647 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1646 modified.append(fn)
1648 modified.append(fn)
1647 elif listclean:
1649 elif listclean:
1648 clean.append(fn)
1650 clean.append(fn)
1649 del mf1[fn]
1651 del mf1[fn]
1650 elif fn not in deleted:
1652 elif fn not in deleted:
1651 added.append(fn)
1653 added.append(fn)
1652 removed = mf1.keys()
1654 removed = mf1.keys()
1653
1655
1654 if working and modified and not self.dirstate._checklink:
1656 if working and modified and not self.dirstate._checklink:
1655 # Symlink placeholders may get non-symlink-like contents
1657 # Symlink placeholders may get non-symlink-like contents
1656 # via user error or dereferencing by NFS or Samba servers,
1658 # via user error or dereferencing by NFS or Samba servers,
1657 # so we filter out any placeholders that don't look like a
1659 # so we filter out any placeholders that don't look like a
1658 # symlink
1660 # symlink
1659 sane = []
1661 sane = []
1660 for f in modified:
1662 for f in modified:
1661 if ctx2.flags(f) == 'l':
1663 if ctx2.flags(f) == 'l':
1662 d = ctx2[f].data()
1664 d = ctx2[f].data()
1663 if len(d) >= 1024 or '\n' in d or util.binary(d):
1665 if len(d) >= 1024 or '\n' in d or util.binary(d):
1664 self.ui.debug('ignoring suspect symlink placeholder'
1666 self.ui.debug('ignoring suspect symlink placeholder'
1665 ' "%s"\n' % f)
1667 ' "%s"\n' % f)
1666 continue
1668 continue
1667 sane.append(f)
1669 sane.append(f)
1668 modified = sane
1670 modified = sane
1669
1671
1670 r = modified, added, removed, deleted, unknown, ignored, clean
1672 r = modified, added, removed, deleted, unknown, ignored, clean
1671
1673
1672 if listsubrepos:
1674 if listsubrepos:
1673 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1675 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1674 if working:
1676 if working:
1675 rev2 = None
1677 rev2 = None
1676 else:
1678 else:
1677 rev2 = ctx2.substate[subpath][1]
1679 rev2 = ctx2.substate[subpath][1]
1678 try:
1680 try:
1679 submatch = matchmod.narrowmatcher(subpath, match)
1681 submatch = matchmod.narrowmatcher(subpath, match)
1680 s = sub.status(rev2, match=submatch, ignored=listignored,
1682 s = sub.status(rev2, match=submatch, ignored=listignored,
1681 clean=listclean, unknown=listunknown,
1683 clean=listclean, unknown=listunknown,
1682 listsubrepos=True)
1684 listsubrepos=True)
1683 for rfiles, sfiles in zip(r, s):
1685 for rfiles, sfiles in zip(r, s):
1684 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1686 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1685 except error.LookupError:
1687 except error.LookupError:
1686 self.ui.status(_("skipping missing subrepository: %s\n")
1688 self.ui.status(_("skipping missing subrepository: %s\n")
1687 % subpath)
1689 % subpath)
1688
1690
1689 for l in r:
1691 for l in r:
1690 l.sort()
1692 l.sort()
1691 return r
1693 return r
1692
1694
1693 def heads(self, start=None):
1695 def heads(self, start=None):
1694 heads = self.changelog.heads(start)
1696 heads = self.changelog.heads(start)
1695 # sort the output in rev descending order
1697 # sort the output in rev descending order
1696 return sorted(heads, key=self.changelog.rev, reverse=True)
1698 return sorted(heads, key=self.changelog.rev, reverse=True)
1697
1699
1698 def branchheads(self, branch=None, start=None, closed=False):
1700 def branchheads(self, branch=None, start=None, closed=False):
1699 '''return a (possibly filtered) list of heads for the given branch
1701 '''return a (possibly filtered) list of heads for the given branch
1700
1702
1701 Heads are returned in topological order, from newest to oldest.
1703 Heads are returned in topological order, from newest to oldest.
1702 If branch is None, use the dirstate branch.
1704 If branch is None, use the dirstate branch.
1703 If start is not None, return only heads reachable from start.
1705 If start is not None, return only heads reachable from start.
1704 If closed is True, return heads that are marked as closed as well.
1706 If closed is True, return heads that are marked as closed as well.
1705 '''
1707 '''
1706 if branch is None:
1708 if branch is None:
1707 branch = self[None].branch()
1709 branch = self[None].branch()
1708 branches = self.branchmap()
1710 branches = self.branchmap()
1709 if branch not in branches:
1711 if branch not in branches:
1710 return []
1712 return []
1711 # the cache returns heads ordered lowest to highest
1713 # the cache returns heads ordered lowest to highest
1712 bheads = list(reversed(branches[branch]))
1714 bheads = list(reversed(branches[branch]))
1713 if start is not None:
1715 if start is not None:
1714 # filter out the heads that cannot be reached from startrev
1716 # filter out the heads that cannot be reached from startrev
1715 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1717 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1716 bheads = [h for h in bheads if h in fbheads]
1718 bheads = [h for h in bheads if h in fbheads]
1717 if not closed:
1719 if not closed:
1718 bheads = [h for h in bheads if not self[h].closesbranch()]
1720 bheads = [h for h in bheads if not self[h].closesbranch()]
1719 return bheads
1721 return bheads
1720
1722
1721 def branches(self, nodes):
1723 def branches(self, nodes):
1722 if not nodes:
1724 if not nodes:
1723 nodes = [self.changelog.tip()]
1725 nodes = [self.changelog.tip()]
1724 b = []
1726 b = []
1725 for n in nodes:
1727 for n in nodes:
1726 t = n
1728 t = n
1727 while True:
1729 while True:
1728 p = self.changelog.parents(n)
1730 p = self.changelog.parents(n)
1729 if p[1] != nullid or p[0] == nullid:
1731 if p[1] != nullid or p[0] == nullid:
1730 b.append((t, n, p[0], p[1]))
1732 b.append((t, n, p[0], p[1]))
1731 break
1733 break
1732 n = p[0]
1734 n = p[0]
1733 return b
1735 return b
1734
1736
1735 def between(self, pairs):
1737 def between(self, pairs):
1736 r = []
1738 r = []
1737
1739
1738 for top, bottom in pairs:
1740 for top, bottom in pairs:
1739 n, l, i = top, [], 0
1741 n, l, i = top, [], 0
1740 f = 1
1742 f = 1
1741
1743
1742 while n != bottom and n != nullid:
1744 while n != bottom and n != nullid:
1743 p = self.changelog.parents(n)[0]
1745 p = self.changelog.parents(n)[0]
1744 if i == f:
1746 if i == f:
1745 l.append(n)
1747 l.append(n)
1746 f = f * 2
1748 f = f * 2
1747 n = p
1749 n = p
1748 i += 1
1750 i += 1
1749
1751
1750 r.append(l)
1752 r.append(l)
1751
1753
1752 return r
1754 return r
1753
1755
1754 def pull(self, remote, heads=None, force=False):
1756 def pull(self, remote, heads=None, force=False):
1755 # don't open transaction for nothing or you break future useful
1757 # don't open transaction for nothing or you break future useful
1756 # rollback call
1758 # rollback call
1757 tr = None
1759 tr = None
1758 trname = 'pull\n' + util.hidepassword(remote.url())
1760 trname = 'pull\n' + util.hidepassword(remote.url())
1759 lock = self.lock()
1761 lock = self.lock()
1760 try:
1762 try:
1761 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1763 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1762 force=force)
1764 force=force)
1763 common, fetch, rheads = tmp
1765 common, fetch, rheads = tmp
1764 if not fetch:
1766 if not fetch:
1765 self.ui.status(_("no changes found\n"))
1767 self.ui.status(_("no changes found\n"))
1766 added = []
1768 added = []
1767 result = 0
1769 result = 0
1768 else:
1770 else:
1769 tr = self.transaction(trname)
1771 tr = self.transaction(trname)
1770 if heads is None and list(common) == [nullid]:
1772 if heads is None and list(common) == [nullid]:
1771 self.ui.status(_("requesting all changes\n"))
1773 self.ui.status(_("requesting all changes\n"))
1772 elif heads is None and remote.capable('changegroupsubset'):
1774 elif heads is None and remote.capable('changegroupsubset'):
1773 # issue1320, avoid a race if remote changed after discovery
1775 # issue1320, avoid a race if remote changed after discovery
1774 heads = rheads
1776 heads = rheads
1775
1777
1776 if remote.capable('getbundle'):
1778 if remote.capable('getbundle'):
1777 cg = remote.getbundle('pull', common=common,
1779 cg = remote.getbundle('pull', common=common,
1778 heads=heads or rheads)
1780 heads=heads or rheads)
1779 elif heads is None:
1781 elif heads is None:
1780 cg = remote.changegroup(fetch, 'pull')
1782 cg = remote.changegroup(fetch, 'pull')
1781 elif not remote.capable('changegroupsubset'):
1783 elif not remote.capable('changegroupsubset'):
1782 raise util.Abort(_("partial pull cannot be done because "
1784 raise util.Abort(_("partial pull cannot be done because "
1783 "other repository doesn't support "
1785 "other repository doesn't support "
1784 "changegroupsubset."))
1786 "changegroupsubset."))
1785 else:
1787 else:
1786 cg = remote.changegroupsubset(fetch, heads, 'pull')
1788 cg = remote.changegroupsubset(fetch, heads, 'pull')
1787 clstart = len(self.changelog)
1789 clstart = len(self.changelog)
1788 result = self.addchangegroup(cg, 'pull', remote.url())
1790 result = self.addchangegroup(cg, 'pull', remote.url())
1789 clend = len(self.changelog)
1791 clend = len(self.changelog)
1790 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1792 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1791
1793
1792 # compute target subset
1794 # compute target subset
1793 if heads is None:
1795 if heads is None:
1794 # We pulled every thing possible
1796 # We pulled every thing possible
1795 # sync on everything common
1797 # sync on everything common
1796 subset = common + added
1798 subset = common + added
1797 else:
1799 else:
1798 # We pulled a specific subset
1800 # We pulled a specific subset
1799 # sync on this subset
1801 # sync on this subset
1800 subset = heads
1802 subset = heads
1801
1803
1802 # Get remote phases data from remote
1804 # Get remote phases data from remote
1803 remotephases = remote.listkeys('phases')
1805 remotephases = remote.listkeys('phases')
1804 publishing = bool(remotephases.get('publishing', False))
1806 publishing = bool(remotephases.get('publishing', False))
1805 if remotephases and not publishing:
1807 if remotephases and not publishing:
1806 # remote is new and unpublishing
1808 # remote is new and unpublishing
1807 pheads, _dr = phases.analyzeremotephases(self, subset,
1809 pheads, _dr = phases.analyzeremotephases(self, subset,
1808 remotephases)
1810 remotephases)
1809 phases.advanceboundary(self, phases.public, pheads)
1811 phases.advanceboundary(self, phases.public, pheads)
1810 phases.advanceboundary(self, phases.draft, subset)
1812 phases.advanceboundary(self, phases.draft, subset)
1811 else:
1813 else:
1812 # Remote is old or publishing all common changesets
1814 # Remote is old or publishing all common changesets
1813 # should be seen as public
1815 # should be seen as public
1814 phases.advanceboundary(self, phases.public, subset)
1816 phases.advanceboundary(self, phases.public, subset)
1815
1817
1816 if obsolete._enabled:
1818 if obsolete._enabled:
1817 self.ui.debug('fetching remote obsolete markers\n')
1819 self.ui.debug('fetching remote obsolete markers\n')
1818 remoteobs = remote.listkeys('obsolete')
1820 remoteobs = remote.listkeys('obsolete')
1819 if 'dump0' in remoteobs:
1821 if 'dump0' in remoteobs:
1820 if tr is None:
1822 if tr is None:
1821 tr = self.transaction(trname)
1823 tr = self.transaction(trname)
1822 for key in sorted(remoteobs, reverse=True):
1824 for key in sorted(remoteobs, reverse=True):
1823 if key.startswith('dump'):
1825 if key.startswith('dump'):
1824 data = base85.b85decode(remoteobs[key])
1826 data = base85.b85decode(remoteobs[key])
1825 self.obsstore.mergemarkers(tr, data)
1827 self.obsstore.mergemarkers(tr, data)
1826 if tr is not None:
1828 if tr is not None:
1827 tr.close()
1829 tr.close()
1828 finally:
1830 finally:
1829 if tr is not None:
1831 if tr is not None:
1830 tr.release()
1832 tr.release()
1831 lock.release()
1833 lock.release()
1832
1834
1833 return result
1835 return result
1834
1836
1835 def checkpush(self, force, revs):
1837 def checkpush(self, force, revs):
1836 """Extensions can override this function if additional checks have
1838 """Extensions can override this function if additional checks have
1837 to be performed before pushing, or call it if they override push
1839 to be performed before pushing, or call it if they override push
1838 command.
1840 command.
1839 """
1841 """
1840 pass
1842 pass
1841
1843
1842 def push(self, remote, force=False, revs=None, newbranch=False):
1844 def push(self, remote, force=False, revs=None, newbranch=False):
1843 '''Push outgoing changesets (limited by revs) from the current
1845 '''Push outgoing changesets (limited by revs) from the current
1844 repository to remote. Return an integer:
1846 repository to remote. Return an integer:
1845 - None means nothing to push
1847 - None means nothing to push
1846 - 0 means HTTP error
1848 - 0 means HTTP error
1847 - 1 means we pushed and remote head count is unchanged *or*
1849 - 1 means we pushed and remote head count is unchanged *or*
1848 we have outgoing changesets but refused to push
1850 we have outgoing changesets but refused to push
1849 - other values as described by addchangegroup()
1851 - other values as described by addchangegroup()
1850 '''
1852 '''
1851 # there are two ways to push to remote repo:
1853 # there are two ways to push to remote repo:
1852 #
1854 #
1853 # addchangegroup assumes local user can lock remote
1855 # addchangegroup assumes local user can lock remote
1854 # repo (local filesystem, old ssh servers).
1856 # repo (local filesystem, old ssh servers).
1855 #
1857 #
1856 # unbundle assumes local user cannot lock remote repo (new ssh
1858 # unbundle assumes local user cannot lock remote repo (new ssh
1857 # servers, http servers).
1859 # servers, http servers).
1858
1860
1859 if not remote.canpush():
1861 if not remote.canpush():
1860 raise util.Abort(_("destination does not support push"))
1862 raise util.Abort(_("destination does not support push"))
1861 # get local lock as we might write phase data
1863 # get local lock as we might write phase data
1862 locallock = self.lock()
1864 locallock = self.lock()
1863 try:
1865 try:
1864 self.checkpush(force, revs)
1866 self.checkpush(force, revs)
1865 lock = None
1867 lock = None
1866 unbundle = remote.capable('unbundle')
1868 unbundle = remote.capable('unbundle')
1867 if not unbundle:
1869 if not unbundle:
1868 lock = remote.lock()
1870 lock = remote.lock()
1869 try:
1871 try:
1870 # discovery
1872 # discovery
1871 fci = discovery.findcommonincoming
1873 fci = discovery.findcommonincoming
1872 commoninc = fci(self, remote, force=force)
1874 commoninc = fci(self, remote, force=force)
1873 common, inc, remoteheads = commoninc
1875 common, inc, remoteheads = commoninc
1874 fco = discovery.findcommonoutgoing
1876 fco = discovery.findcommonoutgoing
1875 outgoing = fco(self, remote, onlyheads=revs,
1877 outgoing = fco(self, remote, onlyheads=revs,
1876 commoninc=commoninc, force=force)
1878 commoninc=commoninc, force=force)
1877
1879
1878
1880
1879 if not outgoing.missing:
1881 if not outgoing.missing:
1880 # nothing to push
1882 # nothing to push
1881 scmutil.nochangesfound(self.ui, self, outgoing.excluded)
1883 scmutil.nochangesfound(self.ui, self, outgoing.excluded)
1882 ret = None
1884 ret = None
1883 else:
1885 else:
1884 # something to push
1886 # something to push
1885 if not force:
1887 if not force:
1886 # if self.obsstore == False --> no obsolete
1888 # if self.obsstore == False --> no obsolete
1887 # then, save the iteration
1889 # then, save the iteration
1888 if self.obsstore:
1890 if self.obsstore:
1889 # this message are here for 80 char limit reason
1891 # this message are here for 80 char limit reason
1890 mso = _("push includes obsolete changeset: %s!")
1892 mso = _("push includes obsolete changeset: %s!")
1891 msu = _("push includes unstable changeset: %s!")
1893 msu = _("push includes unstable changeset: %s!")
1892 msb = _("push includes bumped changeset: %s!")
1894 msb = _("push includes bumped changeset: %s!")
1893 # If we are to push if there is at least one
1895 # If we are to push if there is at least one
1894 # obsolete or unstable changeset in missing, at
1896 # obsolete or unstable changeset in missing, at
1895 # least one of the missinghead will be obsolete or
1897 # least one of the missinghead will be obsolete or
1896 # unstable. So checking heads only is ok
1898 # unstable. So checking heads only is ok
1897 for node in outgoing.missingheads:
1899 for node in outgoing.missingheads:
1898 ctx = self[node]
1900 ctx = self[node]
1899 if ctx.obsolete():
1901 if ctx.obsolete():
1900 raise util.Abort(mso % ctx)
1902 raise util.Abort(mso % ctx)
1901 elif ctx.unstable():
1903 elif ctx.unstable():
1902 raise util.Abort(msu % ctx)
1904 raise util.Abort(msu % ctx)
1903 elif ctx.bumped():
1905 elif ctx.bumped():
1904 raise util.Abort(msb % ctx)
1906 raise util.Abort(msb % ctx)
1905 discovery.checkheads(self, remote, outgoing,
1907 discovery.checkheads(self, remote, outgoing,
1906 remoteheads, newbranch,
1908 remoteheads, newbranch,
1907 bool(inc))
1909 bool(inc))
1908
1910
1909 # create a changegroup from local
1911 # create a changegroup from local
1910 if revs is None and not outgoing.excluded:
1912 if revs is None and not outgoing.excluded:
1911 # push everything,
1913 # push everything,
1912 # use the fast path, no race possible on push
1914 # use the fast path, no race possible on push
1913 cg = self._changegroup(outgoing.missing, 'push')
1915 cg = self._changegroup(outgoing.missing, 'push')
1914 else:
1916 else:
1915 cg = self.getlocalbundle('push', outgoing)
1917 cg = self.getlocalbundle('push', outgoing)
1916
1918
1917 # apply changegroup to remote
1919 # apply changegroup to remote
1918 if unbundle:
1920 if unbundle:
1919 # local repo finds heads on server, finds out what
1921 # local repo finds heads on server, finds out what
1920 # revs it must push. once revs transferred, if server
1922 # revs it must push. once revs transferred, if server
1921 # finds it has different heads (someone else won
1923 # finds it has different heads (someone else won
1922 # commit/push race), server aborts.
1924 # commit/push race), server aborts.
1923 if force:
1925 if force:
1924 remoteheads = ['force']
1926 remoteheads = ['force']
1925 # ssh: return remote's addchangegroup()
1927 # ssh: return remote's addchangegroup()
1926 # http: return remote's addchangegroup() or 0 for error
1928 # http: return remote's addchangegroup() or 0 for error
1927 ret = remote.unbundle(cg, remoteheads, 'push')
1929 ret = remote.unbundle(cg, remoteheads, 'push')
1928 else:
1930 else:
1929 # we return an integer indicating remote head count
1931 # we return an integer indicating remote head count
1930 # change
1932 # change
1931 ret = remote.addchangegroup(cg, 'push', self.url())
1933 ret = remote.addchangegroup(cg, 'push', self.url())
1932
1934
1933 if ret:
1935 if ret:
1934 # push succeed, synchronize target of the push
1936 # push succeed, synchronize target of the push
1935 cheads = outgoing.missingheads
1937 cheads = outgoing.missingheads
1936 elif revs is None:
1938 elif revs is None:
1937 # All out push fails. synchronize all common
1939 # All out push fails. synchronize all common
1938 cheads = outgoing.commonheads
1940 cheads = outgoing.commonheads
1939 else:
1941 else:
1940 # I want cheads = heads(::missingheads and ::commonheads)
1942 # I want cheads = heads(::missingheads and ::commonheads)
1941 # (missingheads is revs with secret changeset filtered out)
1943 # (missingheads is revs with secret changeset filtered out)
1942 #
1944 #
1943 # This can be expressed as:
1945 # This can be expressed as:
1944 # cheads = ( (missingheads and ::commonheads)
1946 # cheads = ( (missingheads and ::commonheads)
1945 # + (commonheads and ::missingheads))"
1947 # + (commonheads and ::missingheads))"
1946 # )
1948 # )
1947 #
1949 #
1948 # while trying to push we already computed the following:
1950 # while trying to push we already computed the following:
1949 # common = (::commonheads)
1951 # common = (::commonheads)
1950 # missing = ((commonheads::missingheads) - commonheads)
1952 # missing = ((commonheads::missingheads) - commonheads)
1951 #
1953 #
1952 # We can pick:
1954 # We can pick:
1953 # * missingheads part of common (::commonheads)
1955 # * missingheads part of common (::commonheads)
1954 common = set(outgoing.common)
1956 common = set(outgoing.common)
1955 cheads = [node for node in revs if node in common]
1957 cheads = [node for node in revs if node in common]
1956 # and
1958 # and
1957 # * commonheads parents on missing
1959 # * commonheads parents on missing
1958 revset = self.set('%ln and parents(roots(%ln))',
1960 revset = self.set('%ln and parents(roots(%ln))',
1959 outgoing.commonheads,
1961 outgoing.commonheads,
1960 outgoing.missing)
1962 outgoing.missing)
1961 cheads.extend(c.node() for c in revset)
1963 cheads.extend(c.node() for c in revset)
1962 # even when we don't push, exchanging phase data is useful
1964 # even when we don't push, exchanging phase data is useful
1963 remotephases = remote.listkeys('phases')
1965 remotephases = remote.listkeys('phases')
1964 if not remotephases: # old server or public only repo
1966 if not remotephases: # old server or public only repo
1965 phases.advanceboundary(self, phases.public, cheads)
1967 phases.advanceboundary(self, phases.public, cheads)
1966 # don't push any phase data as there is nothing to push
1968 # don't push any phase data as there is nothing to push
1967 else:
1969 else:
1968 ana = phases.analyzeremotephases(self, cheads, remotephases)
1970 ana = phases.analyzeremotephases(self, cheads, remotephases)
1969 pheads, droots = ana
1971 pheads, droots = ana
1970 ### Apply remote phase on local
1972 ### Apply remote phase on local
1971 if remotephases.get('publishing', False):
1973 if remotephases.get('publishing', False):
1972 phases.advanceboundary(self, phases.public, cheads)
1974 phases.advanceboundary(self, phases.public, cheads)
1973 else: # publish = False
1975 else: # publish = False
1974 phases.advanceboundary(self, phases.public, pheads)
1976 phases.advanceboundary(self, phases.public, pheads)
1975 phases.advanceboundary(self, phases.draft, cheads)
1977 phases.advanceboundary(self, phases.draft, cheads)
1976 ### Apply local phase on remote
1978 ### Apply local phase on remote
1977
1979
1978 # Get the list of all revs draft on remote by public here.
1980 # Get the list of all revs draft on remote by public here.
1979 # XXX Beware that revset break if droots is not strictly
1981 # XXX Beware that revset break if droots is not strictly
1980 # XXX root we may want to ensure it is but it is costly
1982 # XXX root we may want to ensure it is but it is costly
1981 outdated = self.set('heads((%ln::%ln) and public())',
1983 outdated = self.set('heads((%ln::%ln) and public())',
1982 droots, cheads)
1984 droots, cheads)
1983 for newremotehead in outdated:
1985 for newremotehead in outdated:
1984 r = remote.pushkey('phases',
1986 r = remote.pushkey('phases',
1985 newremotehead.hex(),
1987 newremotehead.hex(),
1986 str(phases.draft),
1988 str(phases.draft),
1987 str(phases.public))
1989 str(phases.public))
1988 if not r:
1990 if not r:
1989 self.ui.warn(_('updating %s to public failed!\n')
1991 self.ui.warn(_('updating %s to public failed!\n')
1990 % newremotehead)
1992 % newremotehead)
1991 self.ui.debug('try to push obsolete markers to remote\n')
1993 self.ui.debug('try to push obsolete markers to remote\n')
1992 if (obsolete._enabled and self.obsstore and
1994 if (obsolete._enabled and self.obsstore and
1993 'obsolete' in remote.listkeys('namespaces')):
1995 'obsolete' in remote.listkeys('namespaces')):
1994 rslts = []
1996 rslts = []
1995 remotedata = self.listkeys('obsolete')
1997 remotedata = self.listkeys('obsolete')
1996 for key in sorted(remotedata, reverse=True):
1998 for key in sorted(remotedata, reverse=True):
1997 # reverse sort to ensure we end with dump0
1999 # reverse sort to ensure we end with dump0
1998 data = remotedata[key]
2000 data = remotedata[key]
1999 rslts.append(remote.pushkey('obsolete', key, '', data))
2001 rslts.append(remote.pushkey('obsolete', key, '', data))
2000 if [r for r in rslts if not r]:
2002 if [r for r in rslts if not r]:
2001 msg = _('failed to push some obsolete markers!\n')
2003 msg = _('failed to push some obsolete markers!\n')
2002 self.ui.warn(msg)
2004 self.ui.warn(msg)
2003 finally:
2005 finally:
2004 if lock is not None:
2006 if lock is not None:
2005 lock.release()
2007 lock.release()
2006 finally:
2008 finally:
2007 locallock.release()
2009 locallock.release()
2008
2010
2009 self.ui.debug("checking for updated bookmarks\n")
2011 self.ui.debug("checking for updated bookmarks\n")
2010 rb = remote.listkeys('bookmarks')
2012 rb = remote.listkeys('bookmarks')
2011 for k in rb.keys():
2013 for k in rb.keys():
2012 if k in self._bookmarks:
2014 if k in self._bookmarks:
2013 nr, nl = rb[k], hex(self._bookmarks[k])
2015 nr, nl = rb[k], hex(self._bookmarks[k])
2014 if nr in self:
2016 if nr in self:
2015 cr = self[nr]
2017 cr = self[nr]
2016 cl = self[nl]
2018 cl = self[nl]
2017 if bookmarks.validdest(self, cr, cl):
2019 if bookmarks.validdest(self, cr, cl):
2018 r = remote.pushkey('bookmarks', k, nr, nl)
2020 r = remote.pushkey('bookmarks', k, nr, nl)
2019 if r:
2021 if r:
2020 self.ui.status(_("updating bookmark %s\n") % k)
2022 self.ui.status(_("updating bookmark %s\n") % k)
2021 else:
2023 else:
2022 self.ui.warn(_('updating bookmark %s'
2024 self.ui.warn(_('updating bookmark %s'
2023 ' failed!\n') % k)
2025 ' failed!\n') % k)
2024
2026
2025 return ret
2027 return ret
2026
2028
2027 def changegroupinfo(self, nodes, source):
2029 def changegroupinfo(self, nodes, source):
2028 if self.ui.verbose or source == 'bundle':
2030 if self.ui.verbose or source == 'bundle':
2029 self.ui.status(_("%d changesets found\n") % len(nodes))
2031 self.ui.status(_("%d changesets found\n") % len(nodes))
2030 if self.ui.debugflag:
2032 if self.ui.debugflag:
2031 self.ui.debug("list of changesets:\n")
2033 self.ui.debug("list of changesets:\n")
2032 for node in nodes:
2034 for node in nodes:
2033 self.ui.debug("%s\n" % hex(node))
2035 self.ui.debug("%s\n" % hex(node))
2034
2036
2035 def changegroupsubset(self, bases, heads, source):
2037 def changegroupsubset(self, bases, heads, source):
2036 """Compute a changegroup consisting of all the nodes that are
2038 """Compute a changegroup consisting of all the nodes that are
2037 descendants of any of the bases and ancestors of any of the heads.
2039 descendants of any of the bases and ancestors of any of the heads.
2038 Return a chunkbuffer object whose read() method will return
2040 Return a chunkbuffer object whose read() method will return
2039 successive changegroup chunks.
2041 successive changegroup chunks.
2040
2042
2041 It is fairly complex as determining which filenodes and which
2043 It is fairly complex as determining which filenodes and which
2042 manifest nodes need to be included for the changeset to be complete
2044 manifest nodes need to be included for the changeset to be complete
2043 is non-trivial.
2045 is non-trivial.
2044
2046
2045 Another wrinkle is doing the reverse, figuring out which changeset in
2047 Another wrinkle is doing the reverse, figuring out which changeset in
2046 the changegroup a particular filenode or manifestnode belongs to.
2048 the changegroup a particular filenode or manifestnode belongs to.
2047 """
2049 """
2048 cl = self.changelog
2050 cl = self.changelog
2049 if not bases:
2051 if not bases:
2050 bases = [nullid]
2052 bases = [nullid]
2051 csets, bases, heads = cl.nodesbetween(bases, heads)
2053 csets, bases, heads = cl.nodesbetween(bases, heads)
2052 # We assume that all ancestors of bases are known
2054 # We assume that all ancestors of bases are known
2053 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2055 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2054 return self._changegroupsubset(common, csets, heads, source)
2056 return self._changegroupsubset(common, csets, heads, source)
2055
2057
2056 def getlocalbundle(self, source, outgoing):
2058 def getlocalbundle(self, source, outgoing):
2057 """Like getbundle, but taking a discovery.outgoing as an argument.
2059 """Like getbundle, but taking a discovery.outgoing as an argument.
2058
2060
2059 This is only implemented for local repos and reuses potentially
2061 This is only implemented for local repos and reuses potentially
2060 precomputed sets in outgoing."""
2062 precomputed sets in outgoing."""
2061 if not outgoing.missing:
2063 if not outgoing.missing:
2062 return None
2064 return None
2063 return self._changegroupsubset(outgoing.common,
2065 return self._changegroupsubset(outgoing.common,
2064 outgoing.missing,
2066 outgoing.missing,
2065 outgoing.missingheads,
2067 outgoing.missingheads,
2066 source)
2068 source)
2067
2069
2068 def getbundle(self, source, heads=None, common=None):
2070 def getbundle(self, source, heads=None, common=None):
2069 """Like changegroupsubset, but returns the set difference between the
2071 """Like changegroupsubset, but returns the set difference between the
2070 ancestors of heads and the ancestors common.
2072 ancestors of heads and the ancestors common.
2071
2073
2072 If heads is None, use the local heads. If common is None, use [nullid].
2074 If heads is None, use the local heads. If common is None, use [nullid].
2073
2075
2074 The nodes in common might not all be known locally due to the way the
2076 The nodes in common might not all be known locally due to the way the
2075 current discovery protocol works.
2077 current discovery protocol works.
2076 """
2078 """
2077 cl = self.changelog
2079 cl = self.changelog
2078 if common:
2080 if common:
2079 nm = cl.nodemap
2081 nm = cl.nodemap
2080 common = [n for n in common if n in nm]
2082 common = [n for n in common if n in nm]
2081 else:
2083 else:
2082 common = [nullid]
2084 common = [nullid]
2083 if not heads:
2085 if not heads:
2084 heads = cl.heads()
2086 heads = cl.heads()
2085 return self.getlocalbundle(source,
2087 return self.getlocalbundle(source,
2086 discovery.outgoing(cl, common, heads))
2088 discovery.outgoing(cl, common, heads))
2087
2089
2088 @unfilteredmeth
2090 @unfilteredmeth
2089 def _changegroupsubset(self, commonrevs, csets, heads, source):
2091 def _changegroupsubset(self, commonrevs, csets, heads, source):
2090
2092
2091 cl = self.changelog
2093 cl = self.changelog
2092 mf = self.manifest
2094 mf = self.manifest
2093 mfs = {} # needed manifests
2095 mfs = {} # needed manifests
2094 fnodes = {} # needed file nodes
2096 fnodes = {} # needed file nodes
2095 changedfiles = set()
2097 changedfiles = set()
2096 fstate = ['', {}]
2098 fstate = ['', {}]
2097 count = [0, 0]
2099 count = [0, 0]
2098
2100
2099 # can we go through the fast path ?
2101 # can we go through the fast path ?
2100 heads.sort()
2102 heads.sort()
2101 if heads == sorted(self.heads()):
2103 if heads == sorted(self.heads()):
2102 return self._changegroup(csets, source)
2104 return self._changegroup(csets, source)
2103
2105
2104 # slow path
2106 # slow path
2105 self.hook('preoutgoing', throw=True, source=source)
2107 self.hook('preoutgoing', throw=True, source=source)
2106 self.changegroupinfo(csets, source)
2108 self.changegroupinfo(csets, source)
2107
2109
2108 # filter any nodes that claim to be part of the known set
2110 # filter any nodes that claim to be part of the known set
2109 def prune(revlog, missing):
2111 def prune(revlog, missing):
2110 rr, rl = revlog.rev, revlog.linkrev
2112 rr, rl = revlog.rev, revlog.linkrev
2111 return [n for n in missing
2113 return [n for n in missing
2112 if rl(rr(n)) not in commonrevs]
2114 if rl(rr(n)) not in commonrevs]
2113
2115
2114 progress = self.ui.progress
2116 progress = self.ui.progress
2115 _bundling = _('bundling')
2117 _bundling = _('bundling')
2116 _changesets = _('changesets')
2118 _changesets = _('changesets')
2117 _manifests = _('manifests')
2119 _manifests = _('manifests')
2118 _files = _('files')
2120 _files = _('files')
2119
2121
2120 def lookup(revlog, x):
2122 def lookup(revlog, x):
2121 if revlog == cl:
2123 if revlog == cl:
2122 c = cl.read(x)
2124 c = cl.read(x)
2123 changedfiles.update(c[3])
2125 changedfiles.update(c[3])
2124 mfs.setdefault(c[0], x)
2126 mfs.setdefault(c[0], x)
2125 count[0] += 1
2127 count[0] += 1
2126 progress(_bundling, count[0],
2128 progress(_bundling, count[0],
2127 unit=_changesets, total=count[1])
2129 unit=_changesets, total=count[1])
2128 return x
2130 return x
2129 elif revlog == mf:
2131 elif revlog == mf:
2130 clnode = mfs[x]
2132 clnode = mfs[x]
2131 mdata = mf.readfast(x)
2133 mdata = mf.readfast(x)
2132 for f, n in mdata.iteritems():
2134 for f, n in mdata.iteritems():
2133 if f in changedfiles:
2135 if f in changedfiles:
2134 fnodes[f].setdefault(n, clnode)
2136 fnodes[f].setdefault(n, clnode)
2135 count[0] += 1
2137 count[0] += 1
2136 progress(_bundling, count[0],
2138 progress(_bundling, count[0],
2137 unit=_manifests, total=count[1])
2139 unit=_manifests, total=count[1])
2138 return clnode
2140 return clnode
2139 else:
2141 else:
2140 progress(_bundling, count[0], item=fstate[0],
2142 progress(_bundling, count[0], item=fstate[0],
2141 unit=_files, total=count[1])
2143 unit=_files, total=count[1])
2142 return fstate[1][x]
2144 return fstate[1][x]
2143
2145
2144 bundler = changegroup.bundle10(lookup)
2146 bundler = changegroup.bundle10(lookup)
2145 reorder = self.ui.config('bundle', 'reorder', 'auto')
2147 reorder = self.ui.config('bundle', 'reorder', 'auto')
2146 if reorder == 'auto':
2148 if reorder == 'auto':
2147 reorder = None
2149 reorder = None
2148 else:
2150 else:
2149 reorder = util.parsebool(reorder)
2151 reorder = util.parsebool(reorder)
2150
2152
2151 def gengroup():
2153 def gengroup():
2152 # Create a changenode group generator that will call our functions
2154 # Create a changenode group generator that will call our functions
2153 # back to lookup the owning changenode and collect information.
2155 # back to lookup the owning changenode and collect information.
2154 count[:] = [0, len(csets)]
2156 count[:] = [0, len(csets)]
2155 for chunk in cl.group(csets, bundler, reorder=reorder):
2157 for chunk in cl.group(csets, bundler, reorder=reorder):
2156 yield chunk
2158 yield chunk
2157 progress(_bundling, None)
2159 progress(_bundling, None)
2158
2160
2159 # Create a generator for the manifestnodes that calls our lookup
2161 # Create a generator for the manifestnodes that calls our lookup
2160 # and data collection functions back.
2162 # and data collection functions back.
2161 for f in changedfiles:
2163 for f in changedfiles:
2162 fnodes[f] = {}
2164 fnodes[f] = {}
2163 count[:] = [0, len(mfs)]
2165 count[:] = [0, len(mfs)]
2164 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2166 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2165 yield chunk
2167 yield chunk
2166 progress(_bundling, None)
2168 progress(_bundling, None)
2167
2169
2168 mfs.clear()
2170 mfs.clear()
2169
2171
2170 # Go through all our files in order sorted by name.
2172 # Go through all our files in order sorted by name.
2171 count[:] = [0, len(changedfiles)]
2173 count[:] = [0, len(changedfiles)]
2172 for fname in sorted(changedfiles):
2174 for fname in sorted(changedfiles):
2173 filerevlog = self.file(fname)
2175 filerevlog = self.file(fname)
2174 if not len(filerevlog):
2176 if not len(filerevlog):
2175 raise util.Abort(_("empty or missing revlog for %s")
2177 raise util.Abort(_("empty or missing revlog for %s")
2176 % fname)
2178 % fname)
2177 fstate[0] = fname
2179 fstate[0] = fname
2178 fstate[1] = fnodes.pop(fname, {})
2180 fstate[1] = fnodes.pop(fname, {})
2179
2181
2180 nodelist = prune(filerevlog, fstate[1])
2182 nodelist = prune(filerevlog, fstate[1])
2181 if nodelist:
2183 if nodelist:
2182 count[0] += 1
2184 count[0] += 1
2183 yield bundler.fileheader(fname)
2185 yield bundler.fileheader(fname)
2184 for chunk in filerevlog.group(nodelist, bundler, reorder):
2186 for chunk in filerevlog.group(nodelist, bundler, reorder):
2185 yield chunk
2187 yield chunk
2186
2188
2187 # Signal that no more groups are left.
2189 # Signal that no more groups are left.
2188 yield bundler.close()
2190 yield bundler.close()
2189 progress(_bundling, None)
2191 progress(_bundling, None)
2190
2192
2191 if csets:
2193 if csets:
2192 self.hook('outgoing', node=hex(csets[0]), source=source)
2194 self.hook('outgoing', node=hex(csets[0]), source=source)
2193
2195
2194 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2196 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2195
2197
2196 def changegroup(self, basenodes, source):
2198 def changegroup(self, basenodes, source):
2197 # to avoid a race we use changegroupsubset() (issue1320)
2199 # to avoid a race we use changegroupsubset() (issue1320)
2198 return self.changegroupsubset(basenodes, self.heads(), source)
2200 return self.changegroupsubset(basenodes, self.heads(), source)
2199
2201
2200 @unfilteredmeth
2202 @unfilteredmeth
2201 def _changegroup(self, nodes, source):
2203 def _changegroup(self, nodes, source):
2202 """Compute the changegroup of all nodes that we have that a recipient
2204 """Compute the changegroup of all nodes that we have that a recipient
2203 doesn't. Return a chunkbuffer object whose read() method will return
2205 doesn't. Return a chunkbuffer object whose read() method will return
2204 successive changegroup chunks.
2206 successive changegroup chunks.
2205
2207
2206 This is much easier than the previous function as we can assume that
2208 This is much easier than the previous function as we can assume that
2207 the recipient has any changenode we aren't sending them.
2209 the recipient has any changenode we aren't sending them.
2208
2210
2209 nodes is the set of nodes to send"""
2211 nodes is the set of nodes to send"""
2210
2212
2211 cl = self.changelog
2213 cl = self.changelog
2212 mf = self.manifest
2214 mf = self.manifest
2213 mfs = {}
2215 mfs = {}
2214 changedfiles = set()
2216 changedfiles = set()
2215 fstate = ['']
2217 fstate = ['']
2216 count = [0, 0]
2218 count = [0, 0]
2217
2219
2218 self.hook('preoutgoing', throw=True, source=source)
2220 self.hook('preoutgoing', throw=True, source=source)
2219 self.changegroupinfo(nodes, source)
2221 self.changegroupinfo(nodes, source)
2220
2222
2221 revset = set([cl.rev(n) for n in nodes])
2223 revset = set([cl.rev(n) for n in nodes])
2222
2224
2223 def gennodelst(log):
2225 def gennodelst(log):
2224 ln, llr = log.node, log.linkrev
2226 ln, llr = log.node, log.linkrev
2225 return [ln(r) for r in log if llr(r) in revset]
2227 return [ln(r) for r in log if llr(r) in revset]
2226
2228
2227 progress = self.ui.progress
2229 progress = self.ui.progress
2228 _bundling = _('bundling')
2230 _bundling = _('bundling')
2229 _changesets = _('changesets')
2231 _changesets = _('changesets')
2230 _manifests = _('manifests')
2232 _manifests = _('manifests')
2231 _files = _('files')
2233 _files = _('files')
2232
2234
2233 def lookup(revlog, x):
2235 def lookup(revlog, x):
2234 if revlog == cl:
2236 if revlog == cl:
2235 c = cl.read(x)
2237 c = cl.read(x)
2236 changedfiles.update(c[3])
2238 changedfiles.update(c[3])
2237 mfs.setdefault(c[0], x)
2239 mfs.setdefault(c[0], x)
2238 count[0] += 1
2240 count[0] += 1
2239 progress(_bundling, count[0],
2241 progress(_bundling, count[0],
2240 unit=_changesets, total=count[1])
2242 unit=_changesets, total=count[1])
2241 return x
2243 return x
2242 elif revlog == mf:
2244 elif revlog == mf:
2243 count[0] += 1
2245 count[0] += 1
2244 progress(_bundling, count[0],
2246 progress(_bundling, count[0],
2245 unit=_manifests, total=count[1])
2247 unit=_manifests, total=count[1])
2246 return cl.node(revlog.linkrev(revlog.rev(x)))
2248 return cl.node(revlog.linkrev(revlog.rev(x)))
2247 else:
2249 else:
2248 progress(_bundling, count[0], item=fstate[0],
2250 progress(_bundling, count[0], item=fstate[0],
2249 total=count[1], unit=_files)
2251 total=count[1], unit=_files)
2250 return cl.node(revlog.linkrev(revlog.rev(x)))
2252 return cl.node(revlog.linkrev(revlog.rev(x)))
2251
2253
2252 bundler = changegroup.bundle10(lookup)
2254 bundler = changegroup.bundle10(lookup)
2253 reorder = self.ui.config('bundle', 'reorder', 'auto')
2255 reorder = self.ui.config('bundle', 'reorder', 'auto')
2254 if reorder == 'auto':
2256 if reorder == 'auto':
2255 reorder = None
2257 reorder = None
2256 else:
2258 else:
2257 reorder = util.parsebool(reorder)
2259 reorder = util.parsebool(reorder)
2258
2260
2259 def gengroup():
2261 def gengroup():
2260 '''yield a sequence of changegroup chunks (strings)'''
2262 '''yield a sequence of changegroup chunks (strings)'''
2261 # construct a list of all changed files
2263 # construct a list of all changed files
2262
2264
2263 count[:] = [0, len(nodes)]
2265 count[:] = [0, len(nodes)]
2264 for chunk in cl.group(nodes, bundler, reorder=reorder):
2266 for chunk in cl.group(nodes, bundler, reorder=reorder):
2265 yield chunk
2267 yield chunk
2266 progress(_bundling, None)
2268 progress(_bundling, None)
2267
2269
2268 count[:] = [0, len(mfs)]
2270 count[:] = [0, len(mfs)]
2269 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2271 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2270 yield chunk
2272 yield chunk
2271 progress(_bundling, None)
2273 progress(_bundling, None)
2272
2274
2273 count[:] = [0, len(changedfiles)]
2275 count[:] = [0, len(changedfiles)]
2274 for fname in sorted(changedfiles):
2276 for fname in sorted(changedfiles):
2275 filerevlog = self.file(fname)
2277 filerevlog = self.file(fname)
2276 if not len(filerevlog):
2278 if not len(filerevlog):
2277 raise util.Abort(_("empty or missing revlog for %s")
2279 raise util.Abort(_("empty or missing revlog for %s")
2278 % fname)
2280 % fname)
2279 fstate[0] = fname
2281 fstate[0] = fname
2280 nodelist = gennodelst(filerevlog)
2282 nodelist = gennodelst(filerevlog)
2281 if nodelist:
2283 if nodelist:
2282 count[0] += 1
2284 count[0] += 1
2283 yield bundler.fileheader(fname)
2285 yield bundler.fileheader(fname)
2284 for chunk in filerevlog.group(nodelist, bundler, reorder):
2286 for chunk in filerevlog.group(nodelist, bundler, reorder):
2285 yield chunk
2287 yield chunk
2286 yield bundler.close()
2288 yield bundler.close()
2287 progress(_bundling, None)
2289 progress(_bundling, None)
2288
2290
2289 if nodes:
2291 if nodes:
2290 self.hook('outgoing', node=hex(nodes[0]), source=source)
2292 self.hook('outgoing', node=hex(nodes[0]), source=source)
2291
2293
2292 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2294 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2293
2295
2296 @unfilteredmeth
2294 def addchangegroup(self, source, srctype, url, emptyok=False):
2297 def addchangegroup(self, source, srctype, url, emptyok=False):
2295 """Add the changegroup returned by source.read() to this repo.
2298 """Add the changegroup returned by source.read() to this repo.
2296 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2299 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2297 the URL of the repo where this changegroup is coming from.
2300 the URL of the repo where this changegroup is coming from.
2298
2301
2299 Return an integer summarizing the change to this repo:
2302 Return an integer summarizing the change to this repo:
2300 - nothing changed or no source: 0
2303 - nothing changed or no source: 0
2301 - more heads than before: 1+added heads (2..n)
2304 - more heads than before: 1+added heads (2..n)
2302 - fewer heads than before: -1-removed heads (-2..-n)
2305 - fewer heads than before: -1-removed heads (-2..-n)
2303 - number of heads stays the same: 1
2306 - number of heads stays the same: 1
2304 """
2307 """
2305 def csmap(x):
2308 def csmap(x):
2306 self.ui.debug("add changeset %s\n" % short(x))
2309 self.ui.debug("add changeset %s\n" % short(x))
2307 return len(cl)
2310 return len(cl)
2308
2311
2309 def revmap(x):
2312 def revmap(x):
2310 return cl.rev(x)
2313 return cl.rev(x)
2311
2314
2312 if not source:
2315 if not source:
2313 return 0
2316 return 0
2314
2317
2315 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2318 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2316
2319
2317 changesets = files = revisions = 0
2320 changesets = files = revisions = 0
2318 efiles = set()
2321 efiles = set()
2319
2322
2320 # write changelog data to temp files so concurrent readers will not see
2323 # write changelog data to temp files so concurrent readers will not see
2321 # inconsistent view
2324 # inconsistent view
2322 cl = self.changelog
2325 cl = self.changelog
2323 cl.delayupdate()
2326 cl.delayupdate()
2324 oldheads = cl.heads()
2327 oldheads = cl.heads()
2325
2328
2326 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2329 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2327 try:
2330 try:
2328 trp = weakref.proxy(tr)
2331 trp = weakref.proxy(tr)
2329 # pull off the changeset group
2332 # pull off the changeset group
2330 self.ui.status(_("adding changesets\n"))
2333 self.ui.status(_("adding changesets\n"))
2331 clstart = len(cl)
2334 clstart = len(cl)
2332 class prog(object):
2335 class prog(object):
2333 step = _('changesets')
2336 step = _('changesets')
2334 count = 1
2337 count = 1
2335 ui = self.ui
2338 ui = self.ui
2336 total = None
2339 total = None
2337 def __call__(self):
2340 def __call__(self):
2338 self.ui.progress(self.step, self.count, unit=_('chunks'),
2341 self.ui.progress(self.step, self.count, unit=_('chunks'),
2339 total=self.total)
2342 total=self.total)
2340 self.count += 1
2343 self.count += 1
2341 pr = prog()
2344 pr = prog()
2342 source.callback = pr
2345 source.callback = pr
2343
2346
2344 source.changelogheader()
2347 source.changelogheader()
2345 srccontent = cl.addgroup(source, csmap, trp)
2348 srccontent = cl.addgroup(source, csmap, trp)
2346 if not (srccontent or emptyok):
2349 if not (srccontent or emptyok):
2347 raise util.Abort(_("received changelog group is empty"))
2350 raise util.Abort(_("received changelog group is empty"))
2348 clend = len(cl)
2351 clend = len(cl)
2349 changesets = clend - clstart
2352 changesets = clend - clstart
2350 for c in xrange(clstart, clend):
2353 for c in xrange(clstart, clend):
2351 efiles.update(self[c].files())
2354 efiles.update(self[c].files())
2352 efiles = len(efiles)
2355 efiles = len(efiles)
2353 self.ui.progress(_('changesets'), None)
2356 self.ui.progress(_('changesets'), None)
2354
2357
2355 # pull off the manifest group
2358 # pull off the manifest group
2356 self.ui.status(_("adding manifests\n"))
2359 self.ui.status(_("adding manifests\n"))
2357 pr.step = _('manifests')
2360 pr.step = _('manifests')
2358 pr.count = 1
2361 pr.count = 1
2359 pr.total = changesets # manifests <= changesets
2362 pr.total = changesets # manifests <= changesets
2360 # no need to check for empty manifest group here:
2363 # no need to check for empty manifest group here:
2361 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2364 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2362 # no new manifest will be created and the manifest group will
2365 # no new manifest will be created and the manifest group will
2363 # be empty during the pull
2366 # be empty during the pull
2364 source.manifestheader()
2367 source.manifestheader()
2365 self.manifest.addgroup(source, revmap, trp)
2368 self.manifest.addgroup(source, revmap, trp)
2366 self.ui.progress(_('manifests'), None)
2369 self.ui.progress(_('manifests'), None)
2367
2370
2368 needfiles = {}
2371 needfiles = {}
2369 if self.ui.configbool('server', 'validate', default=False):
2372 if self.ui.configbool('server', 'validate', default=False):
2370 # validate incoming csets have their manifests
2373 # validate incoming csets have their manifests
2371 for cset in xrange(clstart, clend):
2374 for cset in xrange(clstart, clend):
2372 mfest = self.changelog.read(self.changelog.node(cset))[0]
2375 mfest = self.changelog.read(self.changelog.node(cset))[0]
2373 mfest = self.manifest.readdelta(mfest)
2376 mfest = self.manifest.readdelta(mfest)
2374 # store file nodes we must see
2377 # store file nodes we must see
2375 for f, n in mfest.iteritems():
2378 for f, n in mfest.iteritems():
2376 needfiles.setdefault(f, set()).add(n)
2379 needfiles.setdefault(f, set()).add(n)
2377
2380
2378 # process the files
2381 # process the files
2379 self.ui.status(_("adding file changes\n"))
2382 self.ui.status(_("adding file changes\n"))
2380 pr.step = _('files')
2383 pr.step = _('files')
2381 pr.count = 1
2384 pr.count = 1
2382 pr.total = efiles
2385 pr.total = efiles
2383 source.callback = None
2386 source.callback = None
2384
2387
2385 while True:
2388 while True:
2386 chunkdata = source.filelogheader()
2389 chunkdata = source.filelogheader()
2387 if not chunkdata:
2390 if not chunkdata:
2388 break
2391 break
2389 f = chunkdata["filename"]
2392 f = chunkdata["filename"]
2390 self.ui.debug("adding %s revisions\n" % f)
2393 self.ui.debug("adding %s revisions\n" % f)
2391 pr()
2394 pr()
2392 fl = self.file(f)
2395 fl = self.file(f)
2393 o = len(fl)
2396 o = len(fl)
2394 if not fl.addgroup(source, revmap, trp):
2397 if not fl.addgroup(source, revmap, trp):
2395 raise util.Abort(_("received file revlog group is empty"))
2398 raise util.Abort(_("received file revlog group is empty"))
2396 revisions += len(fl) - o
2399 revisions += len(fl) - o
2397 files += 1
2400 files += 1
2398 if f in needfiles:
2401 if f in needfiles:
2399 needs = needfiles[f]
2402 needs = needfiles[f]
2400 for new in xrange(o, len(fl)):
2403 for new in xrange(o, len(fl)):
2401 n = fl.node(new)
2404 n = fl.node(new)
2402 if n in needs:
2405 if n in needs:
2403 needs.remove(n)
2406 needs.remove(n)
2404 if not needs:
2407 if not needs:
2405 del needfiles[f]
2408 del needfiles[f]
2406 self.ui.progress(_('files'), None)
2409 self.ui.progress(_('files'), None)
2407
2410
2408 for f, needs in needfiles.iteritems():
2411 for f, needs in needfiles.iteritems():
2409 fl = self.file(f)
2412 fl = self.file(f)
2410 for n in needs:
2413 for n in needs:
2411 try:
2414 try:
2412 fl.rev(n)
2415 fl.rev(n)
2413 except error.LookupError:
2416 except error.LookupError:
2414 raise util.Abort(
2417 raise util.Abort(
2415 _('missing file data for %s:%s - run hg verify') %
2418 _('missing file data for %s:%s - run hg verify') %
2416 (f, hex(n)))
2419 (f, hex(n)))
2417
2420
2418 dh = 0
2421 dh = 0
2419 if oldheads:
2422 if oldheads:
2420 heads = cl.heads()
2423 heads = cl.heads()
2421 dh = len(heads) - len(oldheads)
2424 dh = len(heads) - len(oldheads)
2422 for h in heads:
2425 for h in heads:
2423 if h not in oldheads and self[h].closesbranch():
2426 if h not in oldheads and self[h].closesbranch():
2424 dh -= 1
2427 dh -= 1
2425 htext = ""
2428 htext = ""
2426 if dh:
2429 if dh:
2427 htext = _(" (%+d heads)") % dh
2430 htext = _(" (%+d heads)") % dh
2428
2431
2429 self.ui.status(_("added %d changesets"
2432 self.ui.status(_("added %d changesets"
2430 " with %d changes to %d files%s\n")
2433 " with %d changes to %d files%s\n")
2431 % (changesets, revisions, files, htext))
2434 % (changesets, revisions, files, htext))
2432 obsolete.clearobscaches(self)
2435 obsolete.clearobscaches(self)
2433
2436
2434 if changesets > 0:
2437 if changesets > 0:
2435 p = lambda: cl.writepending() and self.root or ""
2438 p = lambda: cl.writepending() and self.root or ""
2436 self.hook('pretxnchangegroup', throw=True,
2439 self.hook('pretxnchangegroup', throw=True,
2437 node=hex(cl.node(clstart)), source=srctype,
2440 node=hex(cl.node(clstart)), source=srctype,
2438 url=url, pending=p)
2441 url=url, pending=p)
2439
2442
2440 added = [cl.node(r) for r in xrange(clstart, clend)]
2443 added = [cl.node(r) for r in xrange(clstart, clend)]
2441 publishing = self.ui.configbool('phases', 'publish', True)
2444 publishing = self.ui.configbool('phases', 'publish', True)
2442 if srctype == 'push':
2445 if srctype == 'push':
2443 # Old server can not push the boundary themself.
2446 # Old server can not push the boundary themself.
2444 # New server won't push the boundary if changeset already
2447 # New server won't push the boundary if changeset already
2445 # existed locally as secrete
2448 # existed locally as secrete
2446 #
2449 #
2447 # We should not use added here but the list of all change in
2450 # We should not use added here but the list of all change in
2448 # the bundle
2451 # the bundle
2449 if publishing:
2452 if publishing:
2450 phases.advanceboundary(self, phases.public, srccontent)
2453 phases.advanceboundary(self, phases.public, srccontent)
2451 else:
2454 else:
2452 phases.advanceboundary(self, phases.draft, srccontent)
2455 phases.advanceboundary(self, phases.draft, srccontent)
2453 phases.retractboundary(self, phases.draft, added)
2456 phases.retractboundary(self, phases.draft, added)
2454 elif srctype != 'strip':
2457 elif srctype != 'strip':
2455 # publishing only alter behavior during push
2458 # publishing only alter behavior during push
2456 #
2459 #
2457 # strip should not touch boundary at all
2460 # strip should not touch boundary at all
2458 phases.retractboundary(self, phases.draft, added)
2461 phases.retractboundary(self, phases.draft, added)
2459
2462
2460 # make changelog see real files again
2463 # make changelog see real files again
2461 cl.finalize(trp)
2464 cl.finalize(trp)
2462
2465
2463 tr.close()
2466 tr.close()
2464
2467
2465 if changesets > 0:
2468 if changesets > 0:
2466 self.updatebranchcache()
2469 self.updatebranchcache()
2467 def runhooks():
2470 def runhooks():
2468 # forcefully update the on-disk branch cache
2471 # forcefully update the on-disk branch cache
2469 self.ui.debug("updating the branch cache\n")
2472 self.ui.debug("updating the branch cache\n")
2470 self.hook("changegroup", node=hex(cl.node(clstart)),
2473 self.hook("changegroup", node=hex(cl.node(clstart)),
2471 source=srctype, url=url)
2474 source=srctype, url=url)
2472
2475
2473 for n in added:
2476 for n in added:
2474 self.hook("incoming", node=hex(n), source=srctype,
2477 self.hook("incoming", node=hex(n), source=srctype,
2475 url=url)
2478 url=url)
2476 self._afterlock(runhooks)
2479 self._afterlock(runhooks)
2477
2480
2478 finally:
2481 finally:
2479 tr.release()
2482 tr.release()
2480 # never return 0 here:
2483 # never return 0 here:
2481 if dh < 0:
2484 if dh < 0:
2482 return dh - 1
2485 return dh - 1
2483 else:
2486 else:
2484 return dh + 1
2487 return dh + 1
2485
2488
2486 def stream_in(self, remote, requirements):
2489 def stream_in(self, remote, requirements):
2487 lock = self.lock()
2490 lock = self.lock()
2488 try:
2491 try:
2489 # Save remote branchmap. We will use it later
2492 # Save remote branchmap. We will use it later
2490 # to speed up branchcache creation
2493 # to speed up branchcache creation
2491 rbranchmap = None
2494 rbranchmap = None
2492 if remote.capable("branchmap"):
2495 if remote.capable("branchmap"):
2493 rbranchmap = remote.branchmap()
2496 rbranchmap = remote.branchmap()
2494
2497
2495 fp = remote.stream_out()
2498 fp = remote.stream_out()
2496 l = fp.readline()
2499 l = fp.readline()
2497 try:
2500 try:
2498 resp = int(l)
2501 resp = int(l)
2499 except ValueError:
2502 except ValueError:
2500 raise error.ResponseError(
2503 raise error.ResponseError(
2501 _('unexpected response from remote server:'), l)
2504 _('unexpected response from remote server:'), l)
2502 if resp == 1:
2505 if resp == 1:
2503 raise util.Abort(_('operation forbidden by server'))
2506 raise util.Abort(_('operation forbidden by server'))
2504 elif resp == 2:
2507 elif resp == 2:
2505 raise util.Abort(_('locking the remote repository failed'))
2508 raise util.Abort(_('locking the remote repository failed'))
2506 elif resp != 0:
2509 elif resp != 0:
2507 raise util.Abort(_('the server sent an unknown error code'))
2510 raise util.Abort(_('the server sent an unknown error code'))
2508 self.ui.status(_('streaming all changes\n'))
2511 self.ui.status(_('streaming all changes\n'))
2509 l = fp.readline()
2512 l = fp.readline()
2510 try:
2513 try:
2511 total_files, total_bytes = map(int, l.split(' ', 1))
2514 total_files, total_bytes = map(int, l.split(' ', 1))
2512 except (ValueError, TypeError):
2515 except (ValueError, TypeError):
2513 raise error.ResponseError(
2516 raise error.ResponseError(
2514 _('unexpected response from remote server:'), l)
2517 _('unexpected response from remote server:'), l)
2515 self.ui.status(_('%d files to transfer, %s of data\n') %
2518 self.ui.status(_('%d files to transfer, %s of data\n') %
2516 (total_files, util.bytecount(total_bytes)))
2519 (total_files, util.bytecount(total_bytes)))
2517 handled_bytes = 0
2520 handled_bytes = 0
2518 self.ui.progress(_('clone'), 0, total=total_bytes)
2521 self.ui.progress(_('clone'), 0, total=total_bytes)
2519 start = time.time()
2522 start = time.time()
2520 for i in xrange(total_files):
2523 for i in xrange(total_files):
2521 # XXX doesn't support '\n' or '\r' in filenames
2524 # XXX doesn't support '\n' or '\r' in filenames
2522 l = fp.readline()
2525 l = fp.readline()
2523 try:
2526 try:
2524 name, size = l.split('\0', 1)
2527 name, size = l.split('\0', 1)
2525 size = int(size)
2528 size = int(size)
2526 except (ValueError, TypeError):
2529 except (ValueError, TypeError):
2527 raise error.ResponseError(
2530 raise error.ResponseError(
2528 _('unexpected response from remote server:'), l)
2531 _('unexpected response from remote server:'), l)
2529 if self.ui.debugflag:
2532 if self.ui.debugflag:
2530 self.ui.debug('adding %s (%s)\n' %
2533 self.ui.debug('adding %s (%s)\n' %
2531 (name, util.bytecount(size)))
2534 (name, util.bytecount(size)))
2532 # for backwards compat, name was partially encoded
2535 # for backwards compat, name was partially encoded
2533 ofp = self.sopener(store.decodedir(name), 'w')
2536 ofp = self.sopener(store.decodedir(name), 'w')
2534 for chunk in util.filechunkiter(fp, limit=size):
2537 for chunk in util.filechunkiter(fp, limit=size):
2535 handled_bytes += len(chunk)
2538 handled_bytes += len(chunk)
2536 self.ui.progress(_('clone'), handled_bytes,
2539 self.ui.progress(_('clone'), handled_bytes,
2537 total=total_bytes)
2540 total=total_bytes)
2538 ofp.write(chunk)
2541 ofp.write(chunk)
2539 ofp.close()
2542 ofp.close()
2540 elapsed = time.time() - start
2543 elapsed = time.time() - start
2541 if elapsed <= 0:
2544 if elapsed <= 0:
2542 elapsed = 0.001
2545 elapsed = 0.001
2543 self.ui.progress(_('clone'), None)
2546 self.ui.progress(_('clone'), None)
2544 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2547 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2545 (util.bytecount(total_bytes), elapsed,
2548 (util.bytecount(total_bytes), elapsed,
2546 util.bytecount(total_bytes / elapsed)))
2549 util.bytecount(total_bytes / elapsed)))
2547
2550
2548 # new requirements = old non-format requirements +
2551 # new requirements = old non-format requirements +
2549 # new format-related
2552 # new format-related
2550 # requirements from the streamed-in repository
2553 # requirements from the streamed-in repository
2551 requirements.update(set(self.requirements) - self.supportedformats)
2554 requirements.update(set(self.requirements) - self.supportedformats)
2552 self._applyrequirements(requirements)
2555 self._applyrequirements(requirements)
2553 self._writerequirements()
2556 self._writerequirements()
2554
2557
2555 if rbranchmap:
2558 if rbranchmap:
2556 rbheads = []
2559 rbheads = []
2557 for bheads in rbranchmap.itervalues():
2560 for bheads in rbranchmap.itervalues():
2558 rbheads.extend(bheads)
2561 rbheads.extend(bheads)
2559
2562
2560 self.branchcache = rbranchmap
2563 self.branchcache = rbranchmap
2561 if rbheads:
2564 if rbheads:
2562 rtiprev = max((int(self.changelog.rev(node))
2565 rtiprev = max((int(self.changelog.rev(node))
2563 for node in rbheads))
2566 for node in rbheads))
2564 self._writebranchcache(self.branchcache,
2567 self._writebranchcache(self.branchcache,
2565 self[rtiprev].node(), rtiprev)
2568 self[rtiprev].node(), rtiprev)
2566 self.invalidate()
2569 self.invalidate()
2567 return len(self.heads()) + 1
2570 return len(self.heads()) + 1
2568 finally:
2571 finally:
2569 lock.release()
2572 lock.release()
2570
2573
2571 def clone(self, remote, heads=[], stream=False):
2574 def clone(self, remote, heads=[], stream=False):
2572 '''clone remote repository.
2575 '''clone remote repository.
2573
2576
2574 keyword arguments:
2577 keyword arguments:
2575 heads: list of revs to clone (forces use of pull)
2578 heads: list of revs to clone (forces use of pull)
2576 stream: use streaming clone if possible'''
2579 stream: use streaming clone if possible'''
2577
2580
2578 # now, all clients that can request uncompressed clones can
2581 # now, all clients that can request uncompressed clones can
2579 # read repo formats supported by all servers that can serve
2582 # read repo formats supported by all servers that can serve
2580 # them.
2583 # them.
2581
2584
2582 # if revlog format changes, client will have to check version
2585 # if revlog format changes, client will have to check version
2583 # and format flags on "stream" capability, and use
2586 # and format flags on "stream" capability, and use
2584 # uncompressed only if compatible.
2587 # uncompressed only if compatible.
2585
2588
2586 if not stream:
2589 if not stream:
2587 # if the server explicitly prefers to stream (for fast LANs)
2590 # if the server explicitly prefers to stream (for fast LANs)
2588 stream = remote.capable('stream-preferred')
2591 stream = remote.capable('stream-preferred')
2589
2592
2590 if stream and not heads:
2593 if stream and not heads:
2591 # 'stream' means remote revlog format is revlogv1 only
2594 # 'stream' means remote revlog format is revlogv1 only
2592 if remote.capable('stream'):
2595 if remote.capable('stream'):
2593 return self.stream_in(remote, set(('revlogv1',)))
2596 return self.stream_in(remote, set(('revlogv1',)))
2594 # otherwise, 'streamreqs' contains the remote revlog format
2597 # otherwise, 'streamreqs' contains the remote revlog format
2595 streamreqs = remote.capable('streamreqs')
2598 streamreqs = remote.capable('streamreqs')
2596 if streamreqs:
2599 if streamreqs:
2597 streamreqs = set(streamreqs.split(','))
2600 streamreqs = set(streamreqs.split(','))
2598 # if we support it, stream in and adjust our requirements
2601 # if we support it, stream in and adjust our requirements
2599 if not streamreqs - self.supportedformats:
2602 if not streamreqs - self.supportedformats:
2600 return self.stream_in(remote, streamreqs)
2603 return self.stream_in(remote, streamreqs)
2601 return self.pull(remote, heads)
2604 return self.pull(remote, heads)
2602
2605
2603 def pushkey(self, namespace, key, old, new):
2606 def pushkey(self, namespace, key, old, new):
2604 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2607 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2605 old=old, new=new)
2608 old=old, new=new)
2606 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2609 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2607 ret = pushkey.push(self, namespace, key, old, new)
2610 ret = pushkey.push(self, namespace, key, old, new)
2608 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2611 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2609 ret=ret)
2612 ret=ret)
2610 return ret
2613 return ret
2611
2614
2612 def listkeys(self, namespace):
2615 def listkeys(self, namespace):
2613 self.hook('prelistkeys', throw=True, namespace=namespace)
2616 self.hook('prelistkeys', throw=True, namespace=namespace)
2614 self.ui.debug('listing keys for "%s"\n' % namespace)
2617 self.ui.debug('listing keys for "%s"\n' % namespace)
2615 values = pushkey.list(self, namespace)
2618 values = pushkey.list(self, namespace)
2616 self.hook('listkeys', namespace=namespace, values=values)
2619 self.hook('listkeys', namespace=namespace, values=values)
2617 return values
2620 return values
2618
2621
2619 def debugwireargs(self, one, two, three=None, four=None, five=None):
2622 def debugwireargs(self, one, two, three=None, four=None, five=None):
2620 '''used to test argument passing over the wire'''
2623 '''used to test argument passing over the wire'''
2621 return "%s %s %s %s %s" % (one, two, three, four, five)
2624 return "%s %s %s %s %s" % (one, two, three, four, five)
2622
2625
2623 def savecommitmessage(self, text):
2626 def savecommitmessage(self, text):
2624 fp = self.opener('last-message.txt', 'wb')
2627 fp = self.opener('last-message.txt', 'wb')
2625 try:
2628 try:
2626 fp.write(text)
2629 fp.write(text)
2627 finally:
2630 finally:
2628 fp.close()
2631 fp.close()
2629 return self.pathto(fp.name[len(self.root)+1:])
2632 return self.pathto(fp.name[len(self.root)+1:])
2630
2633
2631 # used to avoid circular references so destructors work
2634 # used to avoid circular references so destructors work
2632 def aftertrans(files):
2635 def aftertrans(files):
2633 renamefiles = [tuple(t) for t in files]
2636 renamefiles = [tuple(t) for t in files]
2634 def a():
2637 def a():
2635 for src, dest in renamefiles:
2638 for src, dest in renamefiles:
2636 try:
2639 try:
2637 util.rename(src, dest)
2640 util.rename(src, dest)
2638 except OSError: # journal file does not yet exist
2641 except OSError: # journal file does not yet exist
2639 pass
2642 pass
2640 return a
2643 return a
2641
2644
2642 def undoname(fn):
2645 def undoname(fn):
2643 base, name = os.path.split(fn)
2646 base, name = os.path.split(fn)
2644 assert name.startswith('journal')
2647 assert name.startswith('journal')
2645 return os.path.join(base, name.replace('journal', 'undo', 1))
2648 return os.path.join(base, name.replace('journal', 'undo', 1))
2646
2649
2647 def instance(ui, path, create):
2650 def instance(ui, path, create):
2648 return localrepository(ui, util.urllocalpath(path), create)
2651 return localrepository(ui, util.urllocalpath(path), create)
2649
2652
2650 def islocal(path):
2653 def islocal(path):
2651 return True
2654 return True
General Comments 0
You need to be logged in to leave comments. Login now