##// END OF EJS Templates
clfilter: do not use tags cache if there are filtered changesets...
Pierre-Yves David -
r17715:21c50348 default
parent child Browse files
Show More
@@ -1,2616 +1,2620
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import bin, hex, nullid, nullrev, short
7 from node import bin, hex, nullid, nullrev, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock, transaction, store, encoding, base85
11 import lock, transaction, store, encoding, base85
12 import scmutil, util, extensions, hook, error, revset
12 import scmutil, util, extensions, hook, error, revset
13 import match as matchmod
13 import match as matchmod
14 import merge as mergemod
14 import merge as mergemod
15 import tags as tagsmod
15 import tags as tagsmod
16 from lock import release
16 from lock import release
17 import weakref, errno, os, time, inspect
17 import weakref, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19 filecache = scmutil.filecache
19 filecache = scmutil.filecache
20
20
21 class storecache(filecache):
21 class storecache(filecache):
22 """filecache for files in the store"""
22 """filecache for files in the store"""
23 def join(self, obj, fname):
23 def join(self, obj, fname):
24 return obj.sjoin(fname)
24 return obj.sjoin(fname)
25
25
26 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
26 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
27 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
27 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
28
28
29 class localpeer(peer.peerrepository):
29 class localpeer(peer.peerrepository):
30 '''peer for a local repo; reflects only the most recent API'''
30 '''peer for a local repo; reflects only the most recent API'''
31
31
32 def __init__(self, repo, caps=MODERNCAPS):
32 def __init__(self, repo, caps=MODERNCAPS):
33 peer.peerrepository.__init__(self)
33 peer.peerrepository.__init__(self)
34 self._repo = repo
34 self._repo = repo
35 self.ui = repo.ui
35 self.ui = repo.ui
36 self._caps = repo._restrictcapabilities(caps)
36 self._caps = repo._restrictcapabilities(caps)
37 self.requirements = repo.requirements
37 self.requirements = repo.requirements
38 self.supportedformats = repo.supportedformats
38 self.supportedformats = repo.supportedformats
39
39
40 def close(self):
40 def close(self):
41 self._repo.close()
41 self._repo.close()
42
42
43 def _capabilities(self):
43 def _capabilities(self):
44 return self._caps
44 return self._caps
45
45
46 def local(self):
46 def local(self):
47 return self._repo
47 return self._repo
48
48
49 def canpush(self):
49 def canpush(self):
50 return True
50 return True
51
51
52 def url(self):
52 def url(self):
53 return self._repo.url()
53 return self._repo.url()
54
54
55 def lookup(self, key):
55 def lookup(self, key):
56 return self._repo.lookup(key)
56 return self._repo.lookup(key)
57
57
58 def branchmap(self):
58 def branchmap(self):
59 return discovery.visiblebranchmap(self._repo)
59 return discovery.visiblebranchmap(self._repo)
60
60
61 def heads(self):
61 def heads(self):
62 return discovery.visibleheads(self._repo)
62 return discovery.visibleheads(self._repo)
63
63
64 def known(self, nodes):
64 def known(self, nodes):
65 return self._repo.known(nodes)
65 return self._repo.known(nodes)
66
66
67 def getbundle(self, source, heads=None, common=None):
67 def getbundle(self, source, heads=None, common=None):
68 return self._repo.getbundle(source, heads=heads, common=common)
68 return self._repo.getbundle(source, heads=heads, common=common)
69
69
70 # TODO We might want to move the next two calls into legacypeer and add
70 # TODO We might want to move the next two calls into legacypeer and add
71 # unbundle instead.
71 # unbundle instead.
72
72
73 def lock(self):
73 def lock(self):
74 return self._repo.lock()
74 return self._repo.lock()
75
75
76 def addchangegroup(self, cg, source, url):
76 def addchangegroup(self, cg, source, url):
77 return self._repo.addchangegroup(cg, source, url)
77 return self._repo.addchangegroup(cg, source, url)
78
78
79 def pushkey(self, namespace, key, old, new):
79 def pushkey(self, namespace, key, old, new):
80 return self._repo.pushkey(namespace, key, old, new)
80 return self._repo.pushkey(namespace, key, old, new)
81
81
82 def listkeys(self, namespace):
82 def listkeys(self, namespace):
83 return self._repo.listkeys(namespace)
83 return self._repo.listkeys(namespace)
84
84
85 def debugwireargs(self, one, two, three=None, four=None, five=None):
85 def debugwireargs(self, one, two, three=None, four=None, five=None):
86 '''used to test argument passing over the wire'''
86 '''used to test argument passing over the wire'''
87 return "%s %s %s %s %s" % (one, two, three, four, five)
87 return "%s %s %s %s %s" % (one, two, three, four, five)
88
88
89 class locallegacypeer(localpeer):
89 class locallegacypeer(localpeer):
90 '''peer extension which implements legacy methods too; used for tests with
90 '''peer extension which implements legacy methods too; used for tests with
91 restricted capabilities'''
91 restricted capabilities'''
92
92
93 def __init__(self, repo):
93 def __init__(self, repo):
94 localpeer.__init__(self, repo, caps=LEGACYCAPS)
94 localpeer.__init__(self, repo, caps=LEGACYCAPS)
95
95
96 def branches(self, nodes):
96 def branches(self, nodes):
97 return self._repo.branches(nodes)
97 return self._repo.branches(nodes)
98
98
99 def between(self, pairs):
99 def between(self, pairs):
100 return self._repo.between(pairs)
100 return self._repo.between(pairs)
101
101
102 def changegroup(self, basenodes, source):
102 def changegroup(self, basenodes, source):
103 return self._repo.changegroup(basenodes, source)
103 return self._repo.changegroup(basenodes, source)
104
104
105 def changegroupsubset(self, bases, heads, source):
105 def changegroupsubset(self, bases, heads, source):
106 return self._repo.changegroupsubset(bases, heads, source)
106 return self._repo.changegroupsubset(bases, heads, source)
107
107
108 class localrepository(object):
108 class localrepository(object):
109
109
110 supportedformats = set(('revlogv1', 'generaldelta'))
110 supportedformats = set(('revlogv1', 'generaldelta'))
111 supported = supportedformats | set(('store', 'fncache', 'shared',
111 supported = supportedformats | set(('store', 'fncache', 'shared',
112 'dotencode'))
112 'dotencode'))
113 openerreqs = set(('revlogv1', 'generaldelta'))
113 openerreqs = set(('revlogv1', 'generaldelta'))
114 requirements = ['revlogv1']
114 requirements = ['revlogv1']
115
115
116 def _baserequirements(self, create):
116 def _baserequirements(self, create):
117 return self.requirements[:]
117 return self.requirements[:]
118
118
119 def __init__(self, baseui, path=None, create=False):
119 def __init__(self, baseui, path=None, create=False):
120 self.wvfs = scmutil.vfs(path, expand=True)
120 self.wvfs = scmutil.vfs(path, expand=True)
121 self.wopener = self.wvfs
121 self.wopener = self.wvfs
122 self.root = self.wvfs.base
122 self.root = self.wvfs.base
123 self.path = self.wvfs.join(".hg")
123 self.path = self.wvfs.join(".hg")
124 self.origroot = path
124 self.origroot = path
125 self.auditor = scmutil.pathauditor(self.root, self._checknested)
125 self.auditor = scmutil.pathauditor(self.root, self._checknested)
126 self.vfs = scmutil.vfs(self.path)
126 self.vfs = scmutil.vfs(self.path)
127 self.opener = self.vfs
127 self.opener = self.vfs
128 self.baseui = baseui
128 self.baseui = baseui
129 self.ui = baseui.copy()
129 self.ui = baseui.copy()
130 # A list of callback to shape the phase if no data were found.
130 # A list of callback to shape the phase if no data were found.
131 # Callback are in the form: func(repo, roots) --> processed root.
131 # Callback are in the form: func(repo, roots) --> processed root.
132 # This list it to be filled by extension during repo setup
132 # This list it to be filled by extension during repo setup
133 self._phasedefaults = []
133 self._phasedefaults = []
134 try:
134 try:
135 self.ui.readconfig(self.join("hgrc"), self.root)
135 self.ui.readconfig(self.join("hgrc"), self.root)
136 extensions.loadall(self.ui)
136 extensions.loadall(self.ui)
137 except IOError:
137 except IOError:
138 pass
138 pass
139
139
140 if not self.vfs.isdir():
140 if not self.vfs.isdir():
141 if create:
141 if create:
142 if not self.wvfs.exists():
142 if not self.wvfs.exists():
143 self.wvfs.makedirs()
143 self.wvfs.makedirs()
144 self.vfs.makedir(notindexed=True)
144 self.vfs.makedir(notindexed=True)
145 requirements = self._baserequirements(create)
145 requirements = self._baserequirements(create)
146 if self.ui.configbool('format', 'usestore', True):
146 if self.ui.configbool('format', 'usestore', True):
147 self.vfs.mkdir("store")
147 self.vfs.mkdir("store")
148 requirements.append("store")
148 requirements.append("store")
149 if self.ui.configbool('format', 'usefncache', True):
149 if self.ui.configbool('format', 'usefncache', True):
150 requirements.append("fncache")
150 requirements.append("fncache")
151 if self.ui.configbool('format', 'dotencode', True):
151 if self.ui.configbool('format', 'dotencode', True):
152 requirements.append('dotencode')
152 requirements.append('dotencode')
153 # create an invalid changelog
153 # create an invalid changelog
154 self.vfs.append(
154 self.vfs.append(
155 "00changelog.i",
155 "00changelog.i",
156 '\0\0\0\2' # represents revlogv2
156 '\0\0\0\2' # represents revlogv2
157 ' dummy changelog to prevent using the old repo layout'
157 ' dummy changelog to prevent using the old repo layout'
158 )
158 )
159 if self.ui.configbool('format', 'generaldelta', False):
159 if self.ui.configbool('format', 'generaldelta', False):
160 requirements.append("generaldelta")
160 requirements.append("generaldelta")
161 requirements = set(requirements)
161 requirements = set(requirements)
162 else:
162 else:
163 raise error.RepoError(_("repository %s not found") % path)
163 raise error.RepoError(_("repository %s not found") % path)
164 elif create:
164 elif create:
165 raise error.RepoError(_("repository %s already exists") % path)
165 raise error.RepoError(_("repository %s already exists") % path)
166 else:
166 else:
167 try:
167 try:
168 requirements = scmutil.readrequires(self.vfs, self.supported)
168 requirements = scmutil.readrequires(self.vfs, self.supported)
169 except IOError, inst:
169 except IOError, inst:
170 if inst.errno != errno.ENOENT:
170 if inst.errno != errno.ENOENT:
171 raise
171 raise
172 requirements = set()
172 requirements = set()
173
173
174 self.sharedpath = self.path
174 self.sharedpath = self.path
175 try:
175 try:
176 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
176 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
177 if not os.path.exists(s):
177 if not os.path.exists(s):
178 raise error.RepoError(
178 raise error.RepoError(
179 _('.hg/sharedpath points to nonexistent directory %s') % s)
179 _('.hg/sharedpath points to nonexistent directory %s') % s)
180 self.sharedpath = s
180 self.sharedpath = s
181 except IOError, inst:
181 except IOError, inst:
182 if inst.errno != errno.ENOENT:
182 if inst.errno != errno.ENOENT:
183 raise
183 raise
184
184
185 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
185 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
186 self.spath = self.store.path
186 self.spath = self.store.path
187 self.svfs = self.store.vfs
187 self.svfs = self.store.vfs
188 self.sopener = self.svfs
188 self.sopener = self.svfs
189 self.sjoin = self.store.join
189 self.sjoin = self.store.join
190 self.vfs.createmode = self.store.createmode
190 self.vfs.createmode = self.store.createmode
191 self._applyrequirements(requirements)
191 self._applyrequirements(requirements)
192 if create:
192 if create:
193 self._writerequirements()
193 self._writerequirements()
194
194
195
195
196 self._branchcache = None
196 self._branchcache = None
197 self._branchcachetip = None
197 self._branchcachetip = None
198 self.filterpats = {}
198 self.filterpats = {}
199 self._datafilters = {}
199 self._datafilters = {}
200 self._transref = self._lockref = self._wlockref = None
200 self._transref = self._lockref = self._wlockref = None
201
201
202 # A cache for various files under .hg/ that tracks file changes,
202 # A cache for various files under .hg/ that tracks file changes,
203 # (used by the filecache decorator)
203 # (used by the filecache decorator)
204 #
204 #
205 # Maps a property name to its util.filecacheentry
205 # Maps a property name to its util.filecacheentry
206 self._filecache = {}
206 self._filecache = {}
207
207
208 def close(self):
208 def close(self):
209 pass
209 pass
210
210
211 def _restrictcapabilities(self, caps):
211 def _restrictcapabilities(self, caps):
212 return caps
212 return caps
213
213
214 def _applyrequirements(self, requirements):
214 def _applyrequirements(self, requirements):
215 self.requirements = requirements
215 self.requirements = requirements
216 self.sopener.options = dict((r, 1) for r in requirements
216 self.sopener.options = dict((r, 1) for r in requirements
217 if r in self.openerreqs)
217 if r in self.openerreqs)
218
218
219 def _writerequirements(self):
219 def _writerequirements(self):
220 reqfile = self.opener("requires", "w")
220 reqfile = self.opener("requires", "w")
221 for r in self.requirements:
221 for r in self.requirements:
222 reqfile.write("%s\n" % r)
222 reqfile.write("%s\n" % r)
223 reqfile.close()
223 reqfile.close()
224
224
225 def _checknested(self, path):
225 def _checknested(self, path):
226 """Determine if path is a legal nested repository."""
226 """Determine if path is a legal nested repository."""
227 if not path.startswith(self.root):
227 if not path.startswith(self.root):
228 return False
228 return False
229 subpath = path[len(self.root) + 1:]
229 subpath = path[len(self.root) + 1:]
230 normsubpath = util.pconvert(subpath)
230 normsubpath = util.pconvert(subpath)
231
231
232 # XXX: Checking against the current working copy is wrong in
232 # XXX: Checking against the current working copy is wrong in
233 # the sense that it can reject things like
233 # the sense that it can reject things like
234 #
234 #
235 # $ hg cat -r 10 sub/x.txt
235 # $ hg cat -r 10 sub/x.txt
236 #
236 #
237 # if sub/ is no longer a subrepository in the working copy
237 # if sub/ is no longer a subrepository in the working copy
238 # parent revision.
238 # parent revision.
239 #
239 #
240 # However, it can of course also allow things that would have
240 # However, it can of course also allow things that would have
241 # been rejected before, such as the above cat command if sub/
241 # been rejected before, such as the above cat command if sub/
242 # is a subrepository now, but was a normal directory before.
242 # is a subrepository now, but was a normal directory before.
243 # The old path auditor would have rejected by mistake since it
243 # The old path auditor would have rejected by mistake since it
244 # panics when it sees sub/.hg/.
244 # panics when it sees sub/.hg/.
245 #
245 #
246 # All in all, checking against the working copy seems sensible
246 # All in all, checking against the working copy seems sensible
247 # since we want to prevent access to nested repositories on
247 # since we want to prevent access to nested repositories on
248 # the filesystem *now*.
248 # the filesystem *now*.
249 ctx = self[None]
249 ctx = self[None]
250 parts = util.splitpath(subpath)
250 parts = util.splitpath(subpath)
251 while parts:
251 while parts:
252 prefix = '/'.join(parts)
252 prefix = '/'.join(parts)
253 if prefix in ctx.substate:
253 if prefix in ctx.substate:
254 if prefix == normsubpath:
254 if prefix == normsubpath:
255 return True
255 return True
256 else:
256 else:
257 sub = ctx.sub(prefix)
257 sub = ctx.sub(prefix)
258 return sub.checknested(subpath[len(prefix) + 1:])
258 return sub.checknested(subpath[len(prefix) + 1:])
259 else:
259 else:
260 parts.pop()
260 parts.pop()
261 return False
261 return False
262
262
263 def peer(self):
263 def peer(self):
264 return localpeer(self) # not cached to avoid reference cycle
264 return localpeer(self) # not cached to avoid reference cycle
265
265
266 @filecache('bookmarks')
266 @filecache('bookmarks')
267 def _bookmarks(self):
267 def _bookmarks(self):
268 return bookmarks.read(self)
268 return bookmarks.read(self)
269
269
270 @filecache('bookmarks.current')
270 @filecache('bookmarks.current')
271 def _bookmarkcurrent(self):
271 def _bookmarkcurrent(self):
272 return bookmarks.readcurrent(self)
272 return bookmarks.readcurrent(self)
273
273
274 def _writebookmarks(self, marks):
274 def _writebookmarks(self, marks):
275 bookmarks.write(self)
275 bookmarks.write(self)
276
276
277 def bookmarkheads(self, bookmark):
277 def bookmarkheads(self, bookmark):
278 name = bookmark.split('@', 1)[0]
278 name = bookmark.split('@', 1)[0]
279 heads = []
279 heads = []
280 for mark, n in self._bookmarks.iteritems():
280 for mark, n in self._bookmarks.iteritems():
281 if mark.split('@', 1)[0] == name:
281 if mark.split('@', 1)[0] == name:
282 heads.append(n)
282 heads.append(n)
283 return heads
283 return heads
284
284
285 @storecache('phaseroots')
285 @storecache('phaseroots')
286 def _phasecache(self):
286 def _phasecache(self):
287 return phases.phasecache(self, self._phasedefaults)
287 return phases.phasecache(self, self._phasedefaults)
288
288
289 @storecache('obsstore')
289 @storecache('obsstore')
290 def obsstore(self):
290 def obsstore(self):
291 store = obsolete.obsstore(self.sopener)
291 store = obsolete.obsstore(self.sopener)
292 if store and not obsolete._enabled:
292 if store and not obsolete._enabled:
293 # message is rare enough to not be translated
293 # message is rare enough to not be translated
294 msg = 'obsolete feature not enabled but %i markers found!\n'
294 msg = 'obsolete feature not enabled but %i markers found!\n'
295 self.ui.warn(msg % len(list(store)))
295 self.ui.warn(msg % len(list(store)))
296 return store
296 return store
297
297
298 @propertycache
298 @propertycache
299 def hiddenrevs(self):
299 def hiddenrevs(self):
300 """hiddenrevs: revs that should be hidden by command and tools
300 """hiddenrevs: revs that should be hidden by command and tools
301
301
302 This set is carried on the repo to ease initialization and lazy
302 This set is carried on the repo to ease initialization and lazy
303 loading; it'll probably move back to changelog for efficiency and
303 loading; it'll probably move back to changelog for efficiency and
304 consistency reasons.
304 consistency reasons.
305
305
306 Note that the hiddenrevs will needs invalidations when
306 Note that the hiddenrevs will needs invalidations when
307 - a new changesets is added (possible unstable above extinct)
307 - a new changesets is added (possible unstable above extinct)
308 - a new obsolete marker is added (possible new extinct changeset)
308 - a new obsolete marker is added (possible new extinct changeset)
309
309
310 hidden changesets cannot have non-hidden descendants
310 hidden changesets cannot have non-hidden descendants
311 """
311 """
312 hidden = set()
312 hidden = set()
313 if self.obsstore:
313 if self.obsstore:
314 ### hide extinct changeset that are not accessible by any mean
314 ### hide extinct changeset that are not accessible by any mean
315 hiddenquery = 'extinct() - ::(. + bookmark() + tagged())'
315 hiddenquery = 'extinct() - ::(. + bookmark() + tagged())'
316 hidden.update(self.revs(hiddenquery))
316 hidden.update(self.revs(hiddenquery))
317 return hidden
317 return hidden
318
318
319 @storecache('00changelog.i')
319 @storecache('00changelog.i')
320 def changelog(self):
320 def changelog(self):
321 c = changelog.changelog(self.sopener)
321 c = changelog.changelog(self.sopener)
322 if 'HG_PENDING' in os.environ:
322 if 'HG_PENDING' in os.environ:
323 p = os.environ['HG_PENDING']
323 p = os.environ['HG_PENDING']
324 if p.startswith(self.root):
324 if p.startswith(self.root):
325 c.readpending('00changelog.i.a')
325 c.readpending('00changelog.i.a')
326 return c
326 return c
327
327
328 @storecache('00manifest.i')
328 @storecache('00manifest.i')
329 def manifest(self):
329 def manifest(self):
330 return manifest.manifest(self.sopener)
330 return manifest.manifest(self.sopener)
331
331
332 @filecache('dirstate')
332 @filecache('dirstate')
333 def dirstate(self):
333 def dirstate(self):
334 warned = [0]
334 warned = [0]
335 def validate(node):
335 def validate(node):
336 try:
336 try:
337 self.changelog.rev(node)
337 self.changelog.rev(node)
338 return node
338 return node
339 except error.LookupError:
339 except error.LookupError:
340 if not warned[0]:
340 if not warned[0]:
341 warned[0] = True
341 warned[0] = True
342 self.ui.warn(_("warning: ignoring unknown"
342 self.ui.warn(_("warning: ignoring unknown"
343 " working parent %s!\n") % short(node))
343 " working parent %s!\n") % short(node))
344 return nullid
344 return nullid
345
345
346 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
346 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
347
347
348 def __getitem__(self, changeid):
348 def __getitem__(self, changeid):
349 if changeid is None:
349 if changeid is None:
350 return context.workingctx(self)
350 return context.workingctx(self)
351 return context.changectx(self, changeid)
351 return context.changectx(self, changeid)
352
352
353 def __contains__(self, changeid):
353 def __contains__(self, changeid):
354 try:
354 try:
355 return bool(self.lookup(changeid))
355 return bool(self.lookup(changeid))
356 except error.RepoLookupError:
356 except error.RepoLookupError:
357 return False
357 return False
358
358
359 def __nonzero__(self):
359 def __nonzero__(self):
360 return True
360 return True
361
361
362 def __len__(self):
362 def __len__(self):
363 return len(self.changelog)
363 return len(self.changelog)
364
364
365 def __iter__(self):
365 def __iter__(self):
366 return iter(self.changelog)
366 return iter(self.changelog)
367
367
368 def revs(self, expr, *args):
368 def revs(self, expr, *args):
369 '''Return a list of revisions matching the given revset'''
369 '''Return a list of revisions matching the given revset'''
370 expr = revset.formatspec(expr, *args)
370 expr = revset.formatspec(expr, *args)
371 m = revset.match(None, expr)
371 m = revset.match(None, expr)
372 return [r for r in m(self, list(self))]
372 return [r for r in m(self, list(self))]
373
373
374 def set(self, expr, *args):
374 def set(self, expr, *args):
375 '''
375 '''
376 Yield a context for each matching revision, after doing arg
376 Yield a context for each matching revision, after doing arg
377 replacement via revset.formatspec
377 replacement via revset.formatspec
378 '''
378 '''
379 for r in self.revs(expr, *args):
379 for r in self.revs(expr, *args):
380 yield self[r]
380 yield self[r]
381
381
382 def url(self):
382 def url(self):
383 return 'file:' + self.root
383 return 'file:' + self.root
384
384
385 def hook(self, name, throw=False, **args):
385 def hook(self, name, throw=False, **args):
386 return hook.hook(self.ui, self, name, throw, **args)
386 return hook.hook(self.ui, self, name, throw, **args)
387
387
388 tag_disallowed = ':\r\n'
388 tag_disallowed = ':\r\n'
389
389
390 def _tag(self, names, node, message, local, user, date, extra={}):
390 def _tag(self, names, node, message, local, user, date, extra={}):
391 if isinstance(names, str):
391 if isinstance(names, str):
392 allchars = names
392 allchars = names
393 names = (names,)
393 names = (names,)
394 else:
394 else:
395 allchars = ''.join(names)
395 allchars = ''.join(names)
396 for c in self.tag_disallowed:
396 for c in self.tag_disallowed:
397 if c in allchars:
397 if c in allchars:
398 raise util.Abort(_('%r cannot be used in a tag name') % c)
398 raise util.Abort(_('%r cannot be used in a tag name') % c)
399
399
400 branches = self.branchmap()
400 branches = self.branchmap()
401 for name in names:
401 for name in names:
402 self.hook('pretag', throw=True, node=hex(node), tag=name,
402 self.hook('pretag', throw=True, node=hex(node), tag=name,
403 local=local)
403 local=local)
404 if name in branches:
404 if name in branches:
405 self.ui.warn(_("warning: tag %s conflicts with existing"
405 self.ui.warn(_("warning: tag %s conflicts with existing"
406 " branch name\n") % name)
406 " branch name\n") % name)
407
407
408 def writetags(fp, names, munge, prevtags):
408 def writetags(fp, names, munge, prevtags):
409 fp.seek(0, 2)
409 fp.seek(0, 2)
410 if prevtags and prevtags[-1] != '\n':
410 if prevtags and prevtags[-1] != '\n':
411 fp.write('\n')
411 fp.write('\n')
412 for name in names:
412 for name in names:
413 m = munge and munge(name) or name
413 m = munge and munge(name) or name
414 if (self._tagscache.tagtypes and
414 if (self._tagscache.tagtypes and
415 name in self._tagscache.tagtypes):
415 name in self._tagscache.tagtypes):
416 old = self.tags().get(name, nullid)
416 old = self.tags().get(name, nullid)
417 fp.write('%s %s\n' % (hex(old), m))
417 fp.write('%s %s\n' % (hex(old), m))
418 fp.write('%s %s\n' % (hex(node), m))
418 fp.write('%s %s\n' % (hex(node), m))
419 fp.close()
419 fp.close()
420
420
421 prevtags = ''
421 prevtags = ''
422 if local:
422 if local:
423 try:
423 try:
424 fp = self.opener('localtags', 'r+')
424 fp = self.opener('localtags', 'r+')
425 except IOError:
425 except IOError:
426 fp = self.opener('localtags', 'a')
426 fp = self.opener('localtags', 'a')
427 else:
427 else:
428 prevtags = fp.read()
428 prevtags = fp.read()
429
429
430 # local tags are stored in the current charset
430 # local tags are stored in the current charset
431 writetags(fp, names, None, prevtags)
431 writetags(fp, names, None, prevtags)
432 for name in names:
432 for name in names:
433 self.hook('tag', node=hex(node), tag=name, local=local)
433 self.hook('tag', node=hex(node), tag=name, local=local)
434 return
434 return
435
435
436 try:
436 try:
437 fp = self.wfile('.hgtags', 'rb+')
437 fp = self.wfile('.hgtags', 'rb+')
438 except IOError, e:
438 except IOError, e:
439 if e.errno != errno.ENOENT:
439 if e.errno != errno.ENOENT:
440 raise
440 raise
441 fp = self.wfile('.hgtags', 'ab')
441 fp = self.wfile('.hgtags', 'ab')
442 else:
442 else:
443 prevtags = fp.read()
443 prevtags = fp.read()
444
444
445 # committed tags are stored in UTF-8
445 # committed tags are stored in UTF-8
446 writetags(fp, names, encoding.fromlocal, prevtags)
446 writetags(fp, names, encoding.fromlocal, prevtags)
447
447
448 fp.close()
448 fp.close()
449
449
450 self.invalidatecaches()
450 self.invalidatecaches()
451
451
452 if '.hgtags' not in self.dirstate:
452 if '.hgtags' not in self.dirstate:
453 self[None].add(['.hgtags'])
453 self[None].add(['.hgtags'])
454
454
455 m = matchmod.exact(self.root, '', ['.hgtags'])
455 m = matchmod.exact(self.root, '', ['.hgtags'])
456 tagnode = self.commit(message, user, date, extra=extra, match=m)
456 tagnode = self.commit(message, user, date, extra=extra, match=m)
457
457
458 for name in names:
458 for name in names:
459 self.hook('tag', node=hex(node), tag=name, local=local)
459 self.hook('tag', node=hex(node), tag=name, local=local)
460
460
461 return tagnode
461 return tagnode
462
462
463 def tag(self, names, node, message, local, user, date):
463 def tag(self, names, node, message, local, user, date):
464 '''tag a revision with one or more symbolic names.
464 '''tag a revision with one or more symbolic names.
465
465
466 names is a list of strings or, when adding a single tag, names may be a
466 names is a list of strings or, when adding a single tag, names may be a
467 string.
467 string.
468
468
469 if local is True, the tags are stored in a per-repository file.
469 if local is True, the tags are stored in a per-repository file.
470 otherwise, they are stored in the .hgtags file, and a new
470 otherwise, they are stored in the .hgtags file, and a new
471 changeset is committed with the change.
471 changeset is committed with the change.
472
472
473 keyword arguments:
473 keyword arguments:
474
474
475 local: whether to store tags in non-version-controlled file
475 local: whether to store tags in non-version-controlled file
476 (default False)
476 (default False)
477
477
478 message: commit message to use if committing
478 message: commit message to use if committing
479
479
480 user: name of user to use if committing
480 user: name of user to use if committing
481
481
482 date: date tuple to use if committing'''
482 date: date tuple to use if committing'''
483
483
484 if not local:
484 if not local:
485 for x in self.status()[:5]:
485 for x in self.status()[:5]:
486 if '.hgtags' in x:
486 if '.hgtags' in x:
487 raise util.Abort(_('working copy of .hgtags is changed '
487 raise util.Abort(_('working copy of .hgtags is changed '
488 '(please commit .hgtags manually)'))
488 '(please commit .hgtags manually)'))
489
489
490 self.tags() # instantiate the cache
490 self.tags() # instantiate the cache
491 self._tag(names, node, message, local, user, date)
491 self._tag(names, node, message, local, user, date)
492
492
493 @propertycache
493 @propertycache
494 def _tagscache(self):
494 def _tagscache(self):
495 '''Returns a tagscache object that contains various tags related
495 '''Returns a tagscache object that contains various tags related
496 caches.'''
496 caches.'''
497
497
498 # This simplifies its cache management by having one decorated
498 # This simplifies its cache management by having one decorated
499 # function (this one) and the rest simply fetch things from it.
499 # function (this one) and the rest simply fetch things from it.
500 class tagscache(object):
500 class tagscache(object):
501 def __init__(self):
501 def __init__(self):
502 # These two define the set of tags for this repository. tags
502 # These two define the set of tags for this repository. tags
503 # maps tag name to node; tagtypes maps tag name to 'global' or
503 # maps tag name to node; tagtypes maps tag name to 'global' or
504 # 'local'. (Global tags are defined by .hgtags across all
504 # 'local'. (Global tags are defined by .hgtags across all
505 # heads, and local tags are defined in .hg/localtags.)
505 # heads, and local tags are defined in .hg/localtags.)
506 # They constitute the in-memory cache of tags.
506 # They constitute the in-memory cache of tags.
507 self.tags = self.tagtypes = None
507 self.tags = self.tagtypes = None
508
508
509 self.nodetagscache = self.tagslist = None
509 self.nodetagscache = self.tagslist = None
510
510
511 cache = tagscache()
511 cache = tagscache()
512 cache.tags, cache.tagtypes = self._findtags()
512 cache.tags, cache.tagtypes = self._findtags()
513
513
514 return cache
514 return cache
515
515
516 def tags(self):
516 def tags(self):
517 '''return a mapping of tag to node'''
517 '''return a mapping of tag to node'''
518 t = {}
518 t = {}
519 for k, v in self._tagscache.tags.iteritems():
519 if self.changelog.filteredrevs:
520 tags, tt = self._findtags()
521 else:
522 tags = self._tagscache.tags
523 for k, v in tags.iteritems():
520 try:
524 try:
521 # ignore tags to unknown nodes
525 # ignore tags to unknown nodes
522 self.changelog.rev(v)
526 self.changelog.rev(v)
523 t[k] = v
527 t[k] = v
524 except (error.LookupError, ValueError):
528 except (error.LookupError, ValueError):
525 pass
529 pass
526 return t
530 return t
527
531
528 def _findtags(self):
532 def _findtags(self):
529 '''Do the hard work of finding tags. Return a pair of dicts
533 '''Do the hard work of finding tags. Return a pair of dicts
530 (tags, tagtypes) where tags maps tag name to node, and tagtypes
534 (tags, tagtypes) where tags maps tag name to node, and tagtypes
531 maps tag name to a string like \'global\' or \'local\'.
535 maps tag name to a string like \'global\' or \'local\'.
532 Subclasses or extensions are free to add their own tags, but
536 Subclasses or extensions are free to add their own tags, but
533 should be aware that the returned dicts will be retained for the
537 should be aware that the returned dicts will be retained for the
534 duration of the localrepo object.'''
538 duration of the localrepo object.'''
535
539
536 # XXX what tagtype should subclasses/extensions use? Currently
540 # XXX what tagtype should subclasses/extensions use? Currently
537 # mq and bookmarks add tags, but do not set the tagtype at all.
541 # mq and bookmarks add tags, but do not set the tagtype at all.
538 # Should each extension invent its own tag type? Should there
542 # Should each extension invent its own tag type? Should there
539 # be one tagtype for all such "virtual" tags? Or is the status
543 # be one tagtype for all such "virtual" tags? Or is the status
540 # quo fine?
544 # quo fine?
541
545
542 alltags = {} # map tag name to (node, hist)
546 alltags = {} # map tag name to (node, hist)
543 tagtypes = {}
547 tagtypes = {}
544
548
545 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
549 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
546 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
550 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
547
551
548 # Build the return dicts. Have to re-encode tag names because
552 # Build the return dicts. Have to re-encode tag names because
549 # the tags module always uses UTF-8 (in order not to lose info
553 # the tags module always uses UTF-8 (in order not to lose info
550 # writing to the cache), but the rest of Mercurial wants them in
554 # writing to the cache), but the rest of Mercurial wants them in
551 # local encoding.
555 # local encoding.
552 tags = {}
556 tags = {}
553 for (name, (node, hist)) in alltags.iteritems():
557 for (name, (node, hist)) in alltags.iteritems():
554 if node != nullid:
558 if node != nullid:
555 tags[encoding.tolocal(name)] = node
559 tags[encoding.tolocal(name)] = node
556 tags['tip'] = self.changelog.tip()
560 tags['tip'] = self.changelog.tip()
557 tagtypes = dict([(encoding.tolocal(name), value)
561 tagtypes = dict([(encoding.tolocal(name), value)
558 for (name, value) in tagtypes.iteritems()])
562 for (name, value) in tagtypes.iteritems()])
559 return (tags, tagtypes)
563 return (tags, tagtypes)
560
564
561 def tagtype(self, tagname):
565 def tagtype(self, tagname):
562 '''
566 '''
563 return the type of the given tag. result can be:
567 return the type of the given tag. result can be:
564
568
565 'local' : a local tag
569 'local' : a local tag
566 'global' : a global tag
570 'global' : a global tag
567 None : tag does not exist
571 None : tag does not exist
568 '''
572 '''
569
573
570 return self._tagscache.tagtypes.get(tagname)
574 return self._tagscache.tagtypes.get(tagname)
571
575
572 def tagslist(self):
576 def tagslist(self):
573 '''return a list of tags ordered by revision'''
577 '''return a list of tags ordered by revision'''
574 if not self._tagscache.tagslist:
578 if not self._tagscache.tagslist:
575 l = []
579 l = []
576 for t, n in self.tags().iteritems():
580 for t, n in self.tags().iteritems():
577 r = self.changelog.rev(n)
581 r = self.changelog.rev(n)
578 l.append((r, t, n))
582 l.append((r, t, n))
579 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
583 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
580
584
581 return self._tagscache.tagslist
585 return self._tagscache.tagslist
582
586
583 def nodetags(self, node):
587 def nodetags(self, node):
584 '''return the tags associated with a node'''
588 '''return the tags associated with a node'''
585 if not self._tagscache.nodetagscache:
589 if not self._tagscache.nodetagscache:
586 nodetagscache = {}
590 nodetagscache = {}
587 for t, n in self._tagscache.tags.iteritems():
591 for t, n in self._tagscache.tags.iteritems():
588 nodetagscache.setdefault(n, []).append(t)
592 nodetagscache.setdefault(n, []).append(t)
589 for tags in nodetagscache.itervalues():
593 for tags in nodetagscache.itervalues():
590 tags.sort()
594 tags.sort()
591 self._tagscache.nodetagscache = nodetagscache
595 self._tagscache.nodetagscache = nodetagscache
592 return self._tagscache.nodetagscache.get(node, [])
596 return self._tagscache.nodetagscache.get(node, [])
593
597
594 def nodebookmarks(self, node):
598 def nodebookmarks(self, node):
595 marks = []
599 marks = []
596 for bookmark, n in self._bookmarks.iteritems():
600 for bookmark, n in self._bookmarks.iteritems():
597 if n == node:
601 if n == node:
598 marks.append(bookmark)
602 marks.append(bookmark)
599 return sorted(marks)
603 return sorted(marks)
600
604
601 def _branchtags(self, partial, lrev):
605 def _branchtags(self, partial, lrev):
602 # TODO: rename this function?
606 # TODO: rename this function?
603 tiprev = len(self) - 1
607 tiprev = len(self) - 1
604 if lrev != tiprev:
608 if lrev != tiprev:
605 ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
609 ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
606 self._updatebranchcache(partial, ctxgen)
610 self._updatebranchcache(partial, ctxgen)
607 self._writebranchcache(partial, self.changelog.tip(), tiprev)
611 self._writebranchcache(partial, self.changelog.tip(), tiprev)
608
612
609 return partial
613 return partial
610
614
611 def updatebranchcache(self):
615 def updatebranchcache(self):
612 tip = self.changelog.tip()
616 tip = self.changelog.tip()
613 if self._branchcache is not None and self._branchcachetip == tip:
617 if self._branchcache is not None and self._branchcachetip == tip:
614 return
618 return
615
619
616 oldtip = self._branchcachetip
620 oldtip = self._branchcachetip
617 self._branchcachetip = tip
621 self._branchcachetip = tip
618 if oldtip is None or oldtip not in self.changelog.nodemap:
622 if oldtip is None or oldtip not in self.changelog.nodemap:
619 partial, last, lrev = self._readbranchcache()
623 partial, last, lrev = self._readbranchcache()
620 else:
624 else:
621 lrev = self.changelog.rev(oldtip)
625 lrev = self.changelog.rev(oldtip)
622 partial = self._branchcache
626 partial = self._branchcache
623
627
624 self._branchtags(partial, lrev)
628 self._branchtags(partial, lrev)
625 # this private cache holds all heads (not just the branch tips)
629 # this private cache holds all heads (not just the branch tips)
626 self._branchcache = partial
630 self._branchcache = partial
627
631
628 def branchmap(self):
632 def branchmap(self):
629 '''returns a dictionary {branch: [branchheads]}'''
633 '''returns a dictionary {branch: [branchheads]}'''
630 if self.changelog.filteredrevs:
634 if self.changelog.filteredrevs:
631 # some changeset are excluded we can't use the cache
635 # some changeset are excluded we can't use the cache
632 branchmap = {}
636 branchmap = {}
633 self._updatebranchcache(branchmap, (self[r] for r in self))
637 self._updatebranchcache(branchmap, (self[r] for r in self))
634 return branchmap
638 return branchmap
635 else:
639 else:
636 self.updatebranchcache()
640 self.updatebranchcache()
637 return self._branchcache
641 return self._branchcache
638
642
639
643
640 def _branchtip(self, heads):
644 def _branchtip(self, heads):
641 '''return the tipmost branch head in heads'''
645 '''return the tipmost branch head in heads'''
642 tip = heads[-1]
646 tip = heads[-1]
643 for h in reversed(heads):
647 for h in reversed(heads):
644 if not self[h].closesbranch():
648 if not self[h].closesbranch():
645 tip = h
649 tip = h
646 break
650 break
647 return tip
651 return tip
648
652
649 def branchtip(self, branch):
653 def branchtip(self, branch):
650 '''return the tip node for a given branch'''
654 '''return the tip node for a given branch'''
651 if branch not in self.branchmap():
655 if branch not in self.branchmap():
652 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
656 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
653 return self._branchtip(self.branchmap()[branch])
657 return self._branchtip(self.branchmap()[branch])
654
658
655 def branchtags(self):
659 def branchtags(self):
656 '''return a dict where branch names map to the tipmost head of
660 '''return a dict where branch names map to the tipmost head of
657 the branch, open heads come before closed'''
661 the branch, open heads come before closed'''
658 bt = {}
662 bt = {}
659 for bn, heads in self.branchmap().iteritems():
663 for bn, heads in self.branchmap().iteritems():
660 bt[bn] = self._branchtip(heads)
664 bt[bn] = self._branchtip(heads)
661 return bt
665 return bt
662
666
663 def _readbranchcache(self):
667 def _readbranchcache(self):
664 partial = {}
668 partial = {}
665 try:
669 try:
666 f = self.opener("cache/branchheads")
670 f = self.opener("cache/branchheads")
667 lines = f.read().split('\n')
671 lines = f.read().split('\n')
668 f.close()
672 f.close()
669 except (IOError, OSError):
673 except (IOError, OSError):
670 return {}, nullid, nullrev
674 return {}, nullid, nullrev
671
675
672 try:
676 try:
673 last, lrev = lines.pop(0).split(" ", 1)
677 last, lrev = lines.pop(0).split(" ", 1)
674 last, lrev = bin(last), int(lrev)
678 last, lrev = bin(last), int(lrev)
675 if lrev >= len(self) or self[lrev].node() != last:
679 if lrev >= len(self) or self[lrev].node() != last:
676 # invalidate the cache
680 # invalidate the cache
677 raise ValueError('invalidating branch cache (tip differs)')
681 raise ValueError('invalidating branch cache (tip differs)')
678 for l in lines:
682 for l in lines:
679 if not l:
683 if not l:
680 continue
684 continue
681 node, label = l.split(" ", 1)
685 node, label = l.split(" ", 1)
682 label = encoding.tolocal(label.strip())
686 label = encoding.tolocal(label.strip())
683 if not node in self:
687 if not node in self:
684 raise ValueError('invalidating branch cache because node '+
688 raise ValueError('invalidating branch cache because node '+
685 '%s does not exist' % node)
689 '%s does not exist' % node)
686 partial.setdefault(label, []).append(bin(node))
690 partial.setdefault(label, []).append(bin(node))
687 except KeyboardInterrupt:
691 except KeyboardInterrupt:
688 raise
692 raise
689 except Exception, inst:
693 except Exception, inst:
690 if self.ui.debugflag:
694 if self.ui.debugflag:
691 self.ui.warn(str(inst), '\n')
695 self.ui.warn(str(inst), '\n')
692 partial, last, lrev = {}, nullid, nullrev
696 partial, last, lrev = {}, nullid, nullrev
693 return partial, last, lrev
697 return partial, last, lrev
694
698
695 def _writebranchcache(self, branches, tip, tiprev):
699 def _writebranchcache(self, branches, tip, tiprev):
696 try:
700 try:
697 f = self.opener("cache/branchheads", "w", atomictemp=True)
701 f = self.opener("cache/branchheads", "w", atomictemp=True)
698 f.write("%s %s\n" % (hex(tip), tiprev))
702 f.write("%s %s\n" % (hex(tip), tiprev))
699 for label, nodes in branches.iteritems():
703 for label, nodes in branches.iteritems():
700 for node in nodes:
704 for node in nodes:
701 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
705 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
702 f.close()
706 f.close()
703 except (IOError, OSError):
707 except (IOError, OSError):
704 pass
708 pass
705
709
706 def _updatebranchcache(self, partial, ctxgen):
710 def _updatebranchcache(self, partial, ctxgen):
707 """Given a branchhead cache, partial, that may have extra nodes or be
711 """Given a branchhead cache, partial, that may have extra nodes or be
708 missing heads, and a generator of nodes that are at least a superset of
712 missing heads, and a generator of nodes that are at least a superset of
709 heads missing, this function updates partial to be correct.
713 heads missing, this function updates partial to be correct.
710 """
714 """
711 # collect new branch entries
715 # collect new branch entries
712 newbranches = {}
716 newbranches = {}
713 for c in ctxgen:
717 for c in ctxgen:
714 newbranches.setdefault(c.branch(), []).append(c.node())
718 newbranches.setdefault(c.branch(), []).append(c.node())
715 # if older branchheads are reachable from new ones, they aren't
719 # if older branchheads are reachable from new ones, they aren't
716 # really branchheads. Note checking parents is insufficient:
720 # really branchheads. Note checking parents is insufficient:
717 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
721 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
718 for branch, newnodes in newbranches.iteritems():
722 for branch, newnodes in newbranches.iteritems():
719 bheads = partial.setdefault(branch, [])
723 bheads = partial.setdefault(branch, [])
720 # Remove candidate heads that no longer are in the repo (e.g., as
724 # Remove candidate heads that no longer are in the repo (e.g., as
721 # the result of a strip that just happened). Avoid using 'node in
725 # the result of a strip that just happened). Avoid using 'node in
722 # self' here because that dives down into branchcache code somewhat
726 # self' here because that dives down into branchcache code somewhat
723 # recursively.
727 # recursively.
724 bheadrevs = [self.changelog.rev(node) for node in bheads
728 bheadrevs = [self.changelog.rev(node) for node in bheads
725 if self.changelog.hasnode(node)]
729 if self.changelog.hasnode(node)]
726 newheadrevs = [self.changelog.rev(node) for node in newnodes
730 newheadrevs = [self.changelog.rev(node) for node in newnodes
727 if self.changelog.hasnode(node)]
731 if self.changelog.hasnode(node)]
728 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
732 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
729 # Remove duplicates - nodes that are in newheadrevs and are already
733 # Remove duplicates - nodes that are in newheadrevs and are already
730 # in bheadrevs. This can happen if you strip a node whose parent
734 # in bheadrevs. This can happen if you strip a node whose parent
731 # was already a head (because they're on different branches).
735 # was already a head (because they're on different branches).
732 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
736 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
733
737
734 # Starting from tip means fewer passes over reachable. If we know
738 # Starting from tip means fewer passes over reachable. If we know
735 # the new candidates are not ancestors of existing heads, we don't
739 # the new candidates are not ancestors of existing heads, we don't
736 # have to examine ancestors of existing heads
740 # have to examine ancestors of existing heads
737 if ctxisnew:
741 if ctxisnew:
738 iterrevs = sorted(newheadrevs)
742 iterrevs = sorted(newheadrevs)
739 else:
743 else:
740 iterrevs = list(bheadrevs)
744 iterrevs = list(bheadrevs)
741
745
742 # This loop prunes out two kinds of heads - heads that are
746 # This loop prunes out two kinds of heads - heads that are
743 # superseded by a head in newheadrevs, and newheadrevs that are not
747 # superseded by a head in newheadrevs, and newheadrevs that are not
744 # heads because an existing head is their descendant.
748 # heads because an existing head is their descendant.
745 while iterrevs:
749 while iterrevs:
746 latest = iterrevs.pop()
750 latest = iterrevs.pop()
747 if latest not in bheadrevs:
751 if latest not in bheadrevs:
748 continue
752 continue
749 ancestors = set(self.changelog.ancestors([latest],
753 ancestors = set(self.changelog.ancestors([latest],
750 bheadrevs[0]))
754 bheadrevs[0]))
751 if ancestors:
755 if ancestors:
752 bheadrevs = [b for b in bheadrevs if b not in ancestors]
756 bheadrevs = [b for b in bheadrevs if b not in ancestors]
753 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
757 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
754
758
755 # There may be branches that cease to exist when the last commit in the
759 # There may be branches that cease to exist when the last commit in the
756 # branch was stripped. This code filters them out. Note that the
760 # branch was stripped. This code filters them out. Note that the
757 # branch that ceased to exist may not be in newbranches because
761 # branch that ceased to exist may not be in newbranches because
758 # newbranches is the set of candidate heads, which when you strip the
762 # newbranches is the set of candidate heads, which when you strip the
759 # last commit in a branch will be the parent branch.
763 # last commit in a branch will be the parent branch.
760 for branch in partial.keys():
764 for branch in partial.keys():
761 nodes = [head for head in partial[branch]
765 nodes = [head for head in partial[branch]
762 if self.changelog.hasnode(head)]
766 if self.changelog.hasnode(head)]
763 if not nodes:
767 if not nodes:
764 del partial[branch]
768 del partial[branch]
765
769
766 def lookup(self, key):
770 def lookup(self, key):
767 return self[key].node()
771 return self[key].node()
768
772
769 def lookupbranch(self, key, remote=None):
773 def lookupbranch(self, key, remote=None):
770 repo = remote or self
774 repo = remote or self
771 if key in repo.branchmap():
775 if key in repo.branchmap():
772 return key
776 return key
773
777
774 repo = (remote and remote.local()) and remote or self
778 repo = (remote and remote.local()) and remote or self
775 return repo[key].branch()
779 return repo[key].branch()
776
780
777 def known(self, nodes):
781 def known(self, nodes):
778 nm = self.changelog.nodemap
782 nm = self.changelog.nodemap
779 pc = self._phasecache
783 pc = self._phasecache
780 result = []
784 result = []
781 for n in nodes:
785 for n in nodes:
782 r = nm.get(n)
786 r = nm.get(n)
783 resp = not (r is None or pc.phase(self, r) >= phases.secret)
787 resp = not (r is None or pc.phase(self, r) >= phases.secret)
784 result.append(resp)
788 result.append(resp)
785 return result
789 return result
786
790
787 def local(self):
791 def local(self):
788 return self
792 return self
789
793
790 def cancopy(self):
794 def cancopy(self):
791 return self.local() # so statichttprepo's override of local() works
795 return self.local() # so statichttprepo's override of local() works
792
796
793 def join(self, f):
797 def join(self, f):
794 return os.path.join(self.path, f)
798 return os.path.join(self.path, f)
795
799
796 def wjoin(self, f):
800 def wjoin(self, f):
797 return os.path.join(self.root, f)
801 return os.path.join(self.root, f)
798
802
799 def file(self, f):
803 def file(self, f):
800 if f[0] == '/':
804 if f[0] == '/':
801 f = f[1:]
805 f = f[1:]
802 return filelog.filelog(self.sopener, f)
806 return filelog.filelog(self.sopener, f)
803
807
804 def changectx(self, changeid):
808 def changectx(self, changeid):
805 return self[changeid]
809 return self[changeid]
806
810
807 def parents(self, changeid=None):
811 def parents(self, changeid=None):
808 '''get list of changectxs for parents of changeid'''
812 '''get list of changectxs for parents of changeid'''
809 return self[changeid].parents()
813 return self[changeid].parents()
810
814
811 def setparents(self, p1, p2=nullid):
815 def setparents(self, p1, p2=nullid):
812 copies = self.dirstate.setparents(p1, p2)
816 copies = self.dirstate.setparents(p1, p2)
813 if copies:
817 if copies:
814 # Adjust copy records, the dirstate cannot do it, it
818 # Adjust copy records, the dirstate cannot do it, it
815 # requires access to parents manifests. Preserve them
819 # requires access to parents manifests. Preserve them
816 # only for entries added to first parent.
820 # only for entries added to first parent.
817 pctx = self[p1]
821 pctx = self[p1]
818 for f in copies:
822 for f in copies:
819 if f not in pctx and copies[f] in pctx:
823 if f not in pctx and copies[f] in pctx:
820 self.dirstate.copy(copies[f], f)
824 self.dirstate.copy(copies[f], f)
821
825
822 def filectx(self, path, changeid=None, fileid=None):
826 def filectx(self, path, changeid=None, fileid=None):
823 """changeid can be a changeset revision, node, or tag.
827 """changeid can be a changeset revision, node, or tag.
824 fileid can be a file revision or node."""
828 fileid can be a file revision or node."""
825 return context.filectx(self, path, changeid, fileid)
829 return context.filectx(self, path, changeid, fileid)
826
830
827 def getcwd(self):
831 def getcwd(self):
828 return self.dirstate.getcwd()
832 return self.dirstate.getcwd()
829
833
830 def pathto(self, f, cwd=None):
834 def pathto(self, f, cwd=None):
831 return self.dirstate.pathto(f, cwd)
835 return self.dirstate.pathto(f, cwd)
832
836
833 def wfile(self, f, mode='r'):
837 def wfile(self, f, mode='r'):
834 return self.wopener(f, mode)
838 return self.wopener(f, mode)
835
839
836 def _link(self, f):
840 def _link(self, f):
837 return os.path.islink(self.wjoin(f))
841 return os.path.islink(self.wjoin(f))
838
842
839 def _loadfilter(self, filter):
843 def _loadfilter(self, filter):
840 if filter not in self.filterpats:
844 if filter not in self.filterpats:
841 l = []
845 l = []
842 for pat, cmd in self.ui.configitems(filter):
846 for pat, cmd in self.ui.configitems(filter):
843 if cmd == '!':
847 if cmd == '!':
844 continue
848 continue
845 mf = matchmod.match(self.root, '', [pat])
849 mf = matchmod.match(self.root, '', [pat])
846 fn = None
850 fn = None
847 params = cmd
851 params = cmd
848 for name, filterfn in self._datafilters.iteritems():
852 for name, filterfn in self._datafilters.iteritems():
849 if cmd.startswith(name):
853 if cmd.startswith(name):
850 fn = filterfn
854 fn = filterfn
851 params = cmd[len(name):].lstrip()
855 params = cmd[len(name):].lstrip()
852 break
856 break
853 if not fn:
857 if not fn:
854 fn = lambda s, c, **kwargs: util.filter(s, c)
858 fn = lambda s, c, **kwargs: util.filter(s, c)
855 # Wrap old filters not supporting keyword arguments
859 # Wrap old filters not supporting keyword arguments
856 if not inspect.getargspec(fn)[2]:
860 if not inspect.getargspec(fn)[2]:
857 oldfn = fn
861 oldfn = fn
858 fn = lambda s, c, **kwargs: oldfn(s, c)
862 fn = lambda s, c, **kwargs: oldfn(s, c)
859 l.append((mf, fn, params))
863 l.append((mf, fn, params))
860 self.filterpats[filter] = l
864 self.filterpats[filter] = l
861 return self.filterpats[filter]
865 return self.filterpats[filter]
862
866
863 def _filter(self, filterpats, filename, data):
867 def _filter(self, filterpats, filename, data):
864 for mf, fn, cmd in filterpats:
868 for mf, fn, cmd in filterpats:
865 if mf(filename):
869 if mf(filename):
866 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
870 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
867 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
871 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
868 break
872 break
869
873
870 return data
874 return data
871
875
872 @propertycache
876 @propertycache
873 def _encodefilterpats(self):
877 def _encodefilterpats(self):
874 return self._loadfilter('encode')
878 return self._loadfilter('encode')
875
879
876 @propertycache
880 @propertycache
877 def _decodefilterpats(self):
881 def _decodefilterpats(self):
878 return self._loadfilter('decode')
882 return self._loadfilter('decode')
879
883
880 def adddatafilter(self, name, filter):
884 def adddatafilter(self, name, filter):
881 self._datafilters[name] = filter
885 self._datafilters[name] = filter
882
886
883 def wread(self, filename):
887 def wread(self, filename):
884 if self._link(filename):
888 if self._link(filename):
885 data = os.readlink(self.wjoin(filename))
889 data = os.readlink(self.wjoin(filename))
886 else:
890 else:
887 data = self.wopener.read(filename)
891 data = self.wopener.read(filename)
888 return self._filter(self._encodefilterpats, filename, data)
892 return self._filter(self._encodefilterpats, filename, data)
889
893
890 def wwrite(self, filename, data, flags):
894 def wwrite(self, filename, data, flags):
891 data = self._filter(self._decodefilterpats, filename, data)
895 data = self._filter(self._decodefilterpats, filename, data)
892 if 'l' in flags:
896 if 'l' in flags:
893 self.wopener.symlink(data, filename)
897 self.wopener.symlink(data, filename)
894 else:
898 else:
895 self.wopener.write(filename, data)
899 self.wopener.write(filename, data)
896 if 'x' in flags:
900 if 'x' in flags:
897 util.setflags(self.wjoin(filename), False, True)
901 util.setflags(self.wjoin(filename), False, True)
898
902
899 def wwritedata(self, filename, data):
903 def wwritedata(self, filename, data):
900 return self._filter(self._decodefilterpats, filename, data)
904 return self._filter(self._decodefilterpats, filename, data)
901
905
902 def transaction(self, desc):
906 def transaction(self, desc):
903 tr = self._transref and self._transref() or None
907 tr = self._transref and self._transref() or None
904 if tr and tr.running():
908 if tr and tr.running():
905 return tr.nest()
909 return tr.nest()
906
910
907 # abort here if the journal already exists
911 # abort here if the journal already exists
908 if os.path.exists(self.sjoin("journal")):
912 if os.path.exists(self.sjoin("journal")):
909 raise error.RepoError(
913 raise error.RepoError(
910 _("abandoned transaction found - run hg recover"))
914 _("abandoned transaction found - run hg recover"))
911
915
912 self._writejournal(desc)
916 self._writejournal(desc)
913 renames = [(x, undoname(x)) for x in self._journalfiles()]
917 renames = [(x, undoname(x)) for x in self._journalfiles()]
914
918
915 tr = transaction.transaction(self.ui.warn, self.sopener,
919 tr = transaction.transaction(self.ui.warn, self.sopener,
916 self.sjoin("journal"),
920 self.sjoin("journal"),
917 aftertrans(renames),
921 aftertrans(renames),
918 self.store.createmode)
922 self.store.createmode)
919 self._transref = weakref.ref(tr)
923 self._transref = weakref.ref(tr)
920 return tr
924 return tr
921
925
922 def _journalfiles(self):
926 def _journalfiles(self):
923 return (self.sjoin('journal'), self.join('journal.dirstate'),
927 return (self.sjoin('journal'), self.join('journal.dirstate'),
924 self.join('journal.branch'), self.join('journal.desc'),
928 self.join('journal.branch'), self.join('journal.desc'),
925 self.join('journal.bookmarks'),
929 self.join('journal.bookmarks'),
926 self.sjoin('journal.phaseroots'))
930 self.sjoin('journal.phaseroots'))
927
931
928 def undofiles(self):
932 def undofiles(self):
929 return [undoname(x) for x in self._journalfiles()]
933 return [undoname(x) for x in self._journalfiles()]
930
934
931 def _writejournal(self, desc):
935 def _writejournal(self, desc):
932 self.opener.write("journal.dirstate",
936 self.opener.write("journal.dirstate",
933 self.opener.tryread("dirstate"))
937 self.opener.tryread("dirstate"))
934 self.opener.write("journal.branch",
938 self.opener.write("journal.branch",
935 encoding.fromlocal(self.dirstate.branch()))
939 encoding.fromlocal(self.dirstate.branch()))
936 self.opener.write("journal.desc",
940 self.opener.write("journal.desc",
937 "%d\n%s\n" % (len(self), desc))
941 "%d\n%s\n" % (len(self), desc))
938 self.opener.write("journal.bookmarks",
942 self.opener.write("journal.bookmarks",
939 self.opener.tryread("bookmarks"))
943 self.opener.tryread("bookmarks"))
940 self.sopener.write("journal.phaseroots",
944 self.sopener.write("journal.phaseroots",
941 self.sopener.tryread("phaseroots"))
945 self.sopener.tryread("phaseroots"))
942
946
943 def recover(self):
947 def recover(self):
944 lock = self.lock()
948 lock = self.lock()
945 try:
949 try:
946 if os.path.exists(self.sjoin("journal")):
950 if os.path.exists(self.sjoin("journal")):
947 self.ui.status(_("rolling back interrupted transaction\n"))
951 self.ui.status(_("rolling back interrupted transaction\n"))
948 transaction.rollback(self.sopener, self.sjoin("journal"),
952 transaction.rollback(self.sopener, self.sjoin("journal"),
949 self.ui.warn)
953 self.ui.warn)
950 self.invalidate()
954 self.invalidate()
951 return True
955 return True
952 else:
956 else:
953 self.ui.warn(_("no interrupted transaction available\n"))
957 self.ui.warn(_("no interrupted transaction available\n"))
954 return False
958 return False
955 finally:
959 finally:
956 lock.release()
960 lock.release()
957
961
958 def rollback(self, dryrun=False, force=False):
962 def rollback(self, dryrun=False, force=False):
959 wlock = lock = None
963 wlock = lock = None
960 try:
964 try:
961 wlock = self.wlock()
965 wlock = self.wlock()
962 lock = self.lock()
966 lock = self.lock()
963 if os.path.exists(self.sjoin("undo")):
967 if os.path.exists(self.sjoin("undo")):
964 return self._rollback(dryrun, force)
968 return self._rollback(dryrun, force)
965 else:
969 else:
966 self.ui.warn(_("no rollback information available\n"))
970 self.ui.warn(_("no rollback information available\n"))
967 return 1
971 return 1
968 finally:
972 finally:
969 release(lock, wlock)
973 release(lock, wlock)
970
974
971 def _rollback(self, dryrun, force):
975 def _rollback(self, dryrun, force):
972 ui = self.ui
976 ui = self.ui
973 try:
977 try:
974 args = self.opener.read('undo.desc').splitlines()
978 args = self.opener.read('undo.desc').splitlines()
975 (oldlen, desc, detail) = (int(args[0]), args[1], None)
979 (oldlen, desc, detail) = (int(args[0]), args[1], None)
976 if len(args) >= 3:
980 if len(args) >= 3:
977 detail = args[2]
981 detail = args[2]
978 oldtip = oldlen - 1
982 oldtip = oldlen - 1
979
983
980 if detail and ui.verbose:
984 if detail and ui.verbose:
981 msg = (_('repository tip rolled back to revision %s'
985 msg = (_('repository tip rolled back to revision %s'
982 ' (undo %s: %s)\n')
986 ' (undo %s: %s)\n')
983 % (oldtip, desc, detail))
987 % (oldtip, desc, detail))
984 else:
988 else:
985 msg = (_('repository tip rolled back to revision %s'
989 msg = (_('repository tip rolled back to revision %s'
986 ' (undo %s)\n')
990 ' (undo %s)\n')
987 % (oldtip, desc))
991 % (oldtip, desc))
988 except IOError:
992 except IOError:
989 msg = _('rolling back unknown transaction\n')
993 msg = _('rolling back unknown transaction\n')
990 desc = None
994 desc = None
991
995
992 if not force and self['.'] != self['tip'] and desc == 'commit':
996 if not force and self['.'] != self['tip'] and desc == 'commit':
993 raise util.Abort(
997 raise util.Abort(
994 _('rollback of last commit while not checked out '
998 _('rollback of last commit while not checked out '
995 'may lose data'), hint=_('use -f to force'))
999 'may lose data'), hint=_('use -f to force'))
996
1000
997 ui.status(msg)
1001 ui.status(msg)
998 if dryrun:
1002 if dryrun:
999 return 0
1003 return 0
1000
1004
1001 parents = self.dirstate.parents()
1005 parents = self.dirstate.parents()
1002 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1006 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1003 if os.path.exists(self.join('undo.bookmarks')):
1007 if os.path.exists(self.join('undo.bookmarks')):
1004 util.rename(self.join('undo.bookmarks'),
1008 util.rename(self.join('undo.bookmarks'),
1005 self.join('bookmarks'))
1009 self.join('bookmarks'))
1006 if os.path.exists(self.sjoin('undo.phaseroots')):
1010 if os.path.exists(self.sjoin('undo.phaseroots')):
1007 util.rename(self.sjoin('undo.phaseroots'),
1011 util.rename(self.sjoin('undo.phaseroots'),
1008 self.sjoin('phaseroots'))
1012 self.sjoin('phaseroots'))
1009 self.invalidate()
1013 self.invalidate()
1010
1014
1011 # Discard all cache entries to force reloading everything.
1015 # Discard all cache entries to force reloading everything.
1012 self._filecache.clear()
1016 self._filecache.clear()
1013
1017
1014 parentgone = (parents[0] not in self.changelog.nodemap or
1018 parentgone = (parents[0] not in self.changelog.nodemap or
1015 parents[1] not in self.changelog.nodemap)
1019 parents[1] not in self.changelog.nodemap)
1016 if parentgone:
1020 if parentgone:
1017 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1021 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1018 try:
1022 try:
1019 branch = self.opener.read('undo.branch')
1023 branch = self.opener.read('undo.branch')
1020 self.dirstate.setbranch(encoding.tolocal(branch))
1024 self.dirstate.setbranch(encoding.tolocal(branch))
1021 except IOError:
1025 except IOError:
1022 ui.warn(_('named branch could not be reset: '
1026 ui.warn(_('named branch could not be reset: '
1023 'current branch is still \'%s\'\n')
1027 'current branch is still \'%s\'\n')
1024 % self.dirstate.branch())
1028 % self.dirstate.branch())
1025
1029
1026 self.dirstate.invalidate()
1030 self.dirstate.invalidate()
1027 parents = tuple([p.rev() for p in self.parents()])
1031 parents = tuple([p.rev() for p in self.parents()])
1028 if len(parents) > 1:
1032 if len(parents) > 1:
1029 ui.status(_('working directory now based on '
1033 ui.status(_('working directory now based on '
1030 'revisions %d and %d\n') % parents)
1034 'revisions %d and %d\n') % parents)
1031 else:
1035 else:
1032 ui.status(_('working directory now based on '
1036 ui.status(_('working directory now based on '
1033 'revision %d\n') % parents)
1037 'revision %d\n') % parents)
1034 # TODO: if we know which new heads may result from this rollback, pass
1038 # TODO: if we know which new heads may result from this rollback, pass
1035 # them to destroy(), which will prevent the branchhead cache from being
1039 # them to destroy(), which will prevent the branchhead cache from being
1036 # invalidated.
1040 # invalidated.
1037 self.destroyed()
1041 self.destroyed()
1038 return 0
1042 return 0
1039
1043
1040 def invalidatecaches(self):
1044 def invalidatecaches(self):
1041 def delcache(name):
1045 def delcache(name):
1042 try:
1046 try:
1043 delattr(self, name)
1047 delattr(self, name)
1044 except AttributeError:
1048 except AttributeError:
1045 pass
1049 pass
1046
1050
1047 delcache('_tagscache')
1051 delcache('_tagscache')
1048
1052
1049 self._branchcache = None # in UTF-8
1053 self._branchcache = None # in UTF-8
1050 self._branchcachetip = None
1054 self._branchcachetip = None
1051 obsolete.clearobscaches(self)
1055 obsolete.clearobscaches(self)
1052
1056
1053 def invalidatedirstate(self):
1057 def invalidatedirstate(self):
1054 '''Invalidates the dirstate, causing the next call to dirstate
1058 '''Invalidates the dirstate, causing the next call to dirstate
1055 to check if it was modified since the last time it was read,
1059 to check if it was modified since the last time it was read,
1056 rereading it if it has.
1060 rereading it if it has.
1057
1061
1058 This is different to dirstate.invalidate() that it doesn't always
1062 This is different to dirstate.invalidate() that it doesn't always
1059 rereads the dirstate. Use dirstate.invalidate() if you want to
1063 rereads the dirstate. Use dirstate.invalidate() if you want to
1060 explicitly read the dirstate again (i.e. restoring it to a previous
1064 explicitly read the dirstate again (i.e. restoring it to a previous
1061 known good state).'''
1065 known good state).'''
1062 if 'dirstate' in self.__dict__:
1066 if 'dirstate' in self.__dict__:
1063 for k in self.dirstate._filecache:
1067 for k in self.dirstate._filecache:
1064 try:
1068 try:
1065 delattr(self.dirstate, k)
1069 delattr(self.dirstate, k)
1066 except AttributeError:
1070 except AttributeError:
1067 pass
1071 pass
1068 delattr(self, 'dirstate')
1072 delattr(self, 'dirstate')
1069
1073
1070 def invalidate(self):
1074 def invalidate(self):
1071 for k in self._filecache:
1075 for k in self._filecache:
1072 # dirstate is invalidated separately in invalidatedirstate()
1076 # dirstate is invalidated separately in invalidatedirstate()
1073 if k == 'dirstate':
1077 if k == 'dirstate':
1074 continue
1078 continue
1075
1079
1076 try:
1080 try:
1077 delattr(self, k)
1081 delattr(self, k)
1078 except AttributeError:
1082 except AttributeError:
1079 pass
1083 pass
1080 self.invalidatecaches()
1084 self.invalidatecaches()
1081
1085
1082 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1086 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1083 try:
1087 try:
1084 l = lock.lock(lockname, 0, releasefn, desc=desc)
1088 l = lock.lock(lockname, 0, releasefn, desc=desc)
1085 except error.LockHeld, inst:
1089 except error.LockHeld, inst:
1086 if not wait:
1090 if not wait:
1087 raise
1091 raise
1088 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1092 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1089 (desc, inst.locker))
1093 (desc, inst.locker))
1090 # default to 600 seconds timeout
1094 # default to 600 seconds timeout
1091 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1095 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1092 releasefn, desc=desc)
1096 releasefn, desc=desc)
1093 if acquirefn:
1097 if acquirefn:
1094 acquirefn()
1098 acquirefn()
1095 return l
1099 return l
1096
1100
1097 def _afterlock(self, callback):
1101 def _afterlock(self, callback):
1098 """add a callback to the current repository lock.
1102 """add a callback to the current repository lock.
1099
1103
1100 The callback will be executed on lock release."""
1104 The callback will be executed on lock release."""
1101 l = self._lockref and self._lockref()
1105 l = self._lockref and self._lockref()
1102 if l:
1106 if l:
1103 l.postrelease.append(callback)
1107 l.postrelease.append(callback)
1104 else:
1108 else:
1105 callback()
1109 callback()
1106
1110
1107 def lock(self, wait=True):
1111 def lock(self, wait=True):
1108 '''Lock the repository store (.hg/store) and return a weak reference
1112 '''Lock the repository store (.hg/store) and return a weak reference
1109 to the lock. Use this before modifying the store (e.g. committing or
1113 to the lock. Use this before modifying the store (e.g. committing or
1110 stripping). If you are opening a transaction, get a lock as well.)'''
1114 stripping). If you are opening a transaction, get a lock as well.)'''
1111 l = self._lockref and self._lockref()
1115 l = self._lockref and self._lockref()
1112 if l is not None and l.held:
1116 if l is not None and l.held:
1113 l.lock()
1117 l.lock()
1114 return l
1118 return l
1115
1119
1116 def unlock():
1120 def unlock():
1117 self.store.write()
1121 self.store.write()
1118 if '_phasecache' in vars(self):
1122 if '_phasecache' in vars(self):
1119 self._phasecache.write()
1123 self._phasecache.write()
1120 for k, ce in self._filecache.items():
1124 for k, ce in self._filecache.items():
1121 if k == 'dirstate':
1125 if k == 'dirstate':
1122 continue
1126 continue
1123 ce.refresh()
1127 ce.refresh()
1124
1128
1125 l = self._lock(self.sjoin("lock"), wait, unlock,
1129 l = self._lock(self.sjoin("lock"), wait, unlock,
1126 self.invalidate, _('repository %s') % self.origroot)
1130 self.invalidate, _('repository %s') % self.origroot)
1127 self._lockref = weakref.ref(l)
1131 self._lockref = weakref.ref(l)
1128 return l
1132 return l
1129
1133
1130 def wlock(self, wait=True):
1134 def wlock(self, wait=True):
1131 '''Lock the non-store parts of the repository (everything under
1135 '''Lock the non-store parts of the repository (everything under
1132 .hg except .hg/store) and return a weak reference to the lock.
1136 .hg except .hg/store) and return a weak reference to the lock.
1133 Use this before modifying files in .hg.'''
1137 Use this before modifying files in .hg.'''
1134 l = self._wlockref and self._wlockref()
1138 l = self._wlockref and self._wlockref()
1135 if l is not None and l.held:
1139 if l is not None and l.held:
1136 l.lock()
1140 l.lock()
1137 return l
1141 return l
1138
1142
1139 def unlock():
1143 def unlock():
1140 self.dirstate.write()
1144 self.dirstate.write()
1141 ce = self._filecache.get('dirstate')
1145 ce = self._filecache.get('dirstate')
1142 if ce:
1146 if ce:
1143 ce.refresh()
1147 ce.refresh()
1144
1148
1145 l = self._lock(self.join("wlock"), wait, unlock,
1149 l = self._lock(self.join("wlock"), wait, unlock,
1146 self.invalidatedirstate, _('working directory of %s') %
1150 self.invalidatedirstate, _('working directory of %s') %
1147 self.origroot)
1151 self.origroot)
1148 self._wlockref = weakref.ref(l)
1152 self._wlockref = weakref.ref(l)
1149 return l
1153 return l
1150
1154
1151 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1155 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1152 """
1156 """
1153 commit an individual file as part of a larger transaction
1157 commit an individual file as part of a larger transaction
1154 """
1158 """
1155
1159
1156 fname = fctx.path()
1160 fname = fctx.path()
1157 text = fctx.data()
1161 text = fctx.data()
1158 flog = self.file(fname)
1162 flog = self.file(fname)
1159 fparent1 = manifest1.get(fname, nullid)
1163 fparent1 = manifest1.get(fname, nullid)
1160 fparent2 = fparent2o = manifest2.get(fname, nullid)
1164 fparent2 = fparent2o = manifest2.get(fname, nullid)
1161
1165
1162 meta = {}
1166 meta = {}
1163 copy = fctx.renamed()
1167 copy = fctx.renamed()
1164 if copy and copy[0] != fname:
1168 if copy and copy[0] != fname:
1165 # Mark the new revision of this file as a copy of another
1169 # Mark the new revision of this file as a copy of another
1166 # file. This copy data will effectively act as a parent
1170 # file. This copy data will effectively act as a parent
1167 # of this new revision. If this is a merge, the first
1171 # of this new revision. If this is a merge, the first
1168 # parent will be the nullid (meaning "look up the copy data")
1172 # parent will be the nullid (meaning "look up the copy data")
1169 # and the second one will be the other parent. For example:
1173 # and the second one will be the other parent. For example:
1170 #
1174 #
1171 # 0 --- 1 --- 3 rev1 changes file foo
1175 # 0 --- 1 --- 3 rev1 changes file foo
1172 # \ / rev2 renames foo to bar and changes it
1176 # \ / rev2 renames foo to bar and changes it
1173 # \- 2 -/ rev3 should have bar with all changes and
1177 # \- 2 -/ rev3 should have bar with all changes and
1174 # should record that bar descends from
1178 # should record that bar descends from
1175 # bar in rev2 and foo in rev1
1179 # bar in rev2 and foo in rev1
1176 #
1180 #
1177 # this allows this merge to succeed:
1181 # this allows this merge to succeed:
1178 #
1182 #
1179 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1183 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1180 # \ / merging rev3 and rev4 should use bar@rev2
1184 # \ / merging rev3 and rev4 should use bar@rev2
1181 # \- 2 --- 4 as the merge base
1185 # \- 2 --- 4 as the merge base
1182 #
1186 #
1183
1187
1184 cfname = copy[0]
1188 cfname = copy[0]
1185 crev = manifest1.get(cfname)
1189 crev = manifest1.get(cfname)
1186 newfparent = fparent2
1190 newfparent = fparent2
1187
1191
1188 if manifest2: # branch merge
1192 if manifest2: # branch merge
1189 if fparent2 == nullid or crev is None: # copied on remote side
1193 if fparent2 == nullid or crev is None: # copied on remote side
1190 if cfname in manifest2:
1194 if cfname in manifest2:
1191 crev = manifest2[cfname]
1195 crev = manifest2[cfname]
1192 newfparent = fparent1
1196 newfparent = fparent1
1193
1197
1194 # find source in nearest ancestor if we've lost track
1198 # find source in nearest ancestor if we've lost track
1195 if not crev:
1199 if not crev:
1196 self.ui.debug(" %s: searching for copy revision for %s\n" %
1200 self.ui.debug(" %s: searching for copy revision for %s\n" %
1197 (fname, cfname))
1201 (fname, cfname))
1198 for ancestor in self[None].ancestors():
1202 for ancestor in self[None].ancestors():
1199 if cfname in ancestor:
1203 if cfname in ancestor:
1200 crev = ancestor[cfname].filenode()
1204 crev = ancestor[cfname].filenode()
1201 break
1205 break
1202
1206
1203 if crev:
1207 if crev:
1204 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1208 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1205 meta["copy"] = cfname
1209 meta["copy"] = cfname
1206 meta["copyrev"] = hex(crev)
1210 meta["copyrev"] = hex(crev)
1207 fparent1, fparent2 = nullid, newfparent
1211 fparent1, fparent2 = nullid, newfparent
1208 else:
1212 else:
1209 self.ui.warn(_("warning: can't find ancestor for '%s' "
1213 self.ui.warn(_("warning: can't find ancestor for '%s' "
1210 "copied from '%s'!\n") % (fname, cfname))
1214 "copied from '%s'!\n") % (fname, cfname))
1211
1215
1212 elif fparent2 != nullid:
1216 elif fparent2 != nullid:
1213 # is one parent an ancestor of the other?
1217 # is one parent an ancestor of the other?
1214 fparentancestor = flog.ancestor(fparent1, fparent2)
1218 fparentancestor = flog.ancestor(fparent1, fparent2)
1215 if fparentancestor == fparent1:
1219 if fparentancestor == fparent1:
1216 fparent1, fparent2 = fparent2, nullid
1220 fparent1, fparent2 = fparent2, nullid
1217 elif fparentancestor == fparent2:
1221 elif fparentancestor == fparent2:
1218 fparent2 = nullid
1222 fparent2 = nullid
1219
1223
1220 # is the file changed?
1224 # is the file changed?
1221 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1225 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1222 changelist.append(fname)
1226 changelist.append(fname)
1223 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1227 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1224
1228
1225 # are just the flags changed during merge?
1229 # are just the flags changed during merge?
1226 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1230 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1227 changelist.append(fname)
1231 changelist.append(fname)
1228
1232
1229 return fparent1
1233 return fparent1
1230
1234
1231 def commit(self, text="", user=None, date=None, match=None, force=False,
1235 def commit(self, text="", user=None, date=None, match=None, force=False,
1232 editor=False, extra={}):
1236 editor=False, extra={}):
1233 """Add a new revision to current repository.
1237 """Add a new revision to current repository.
1234
1238
1235 Revision information is gathered from the working directory,
1239 Revision information is gathered from the working directory,
1236 match can be used to filter the committed files. If editor is
1240 match can be used to filter the committed files. If editor is
1237 supplied, it is called to get a commit message.
1241 supplied, it is called to get a commit message.
1238 """
1242 """
1239
1243
1240 def fail(f, msg):
1244 def fail(f, msg):
1241 raise util.Abort('%s: %s' % (f, msg))
1245 raise util.Abort('%s: %s' % (f, msg))
1242
1246
1243 if not match:
1247 if not match:
1244 match = matchmod.always(self.root, '')
1248 match = matchmod.always(self.root, '')
1245
1249
1246 if not force:
1250 if not force:
1247 vdirs = []
1251 vdirs = []
1248 match.dir = vdirs.append
1252 match.dir = vdirs.append
1249 match.bad = fail
1253 match.bad = fail
1250
1254
1251 wlock = self.wlock()
1255 wlock = self.wlock()
1252 try:
1256 try:
1253 wctx = self[None]
1257 wctx = self[None]
1254 merge = len(wctx.parents()) > 1
1258 merge = len(wctx.parents()) > 1
1255
1259
1256 if (not force and merge and match and
1260 if (not force and merge and match and
1257 (match.files() or match.anypats())):
1261 (match.files() or match.anypats())):
1258 raise util.Abort(_('cannot partially commit a merge '
1262 raise util.Abort(_('cannot partially commit a merge '
1259 '(do not specify files or patterns)'))
1263 '(do not specify files or patterns)'))
1260
1264
1261 changes = self.status(match=match, clean=force)
1265 changes = self.status(match=match, clean=force)
1262 if force:
1266 if force:
1263 changes[0].extend(changes[6]) # mq may commit unchanged files
1267 changes[0].extend(changes[6]) # mq may commit unchanged files
1264
1268
1265 # check subrepos
1269 # check subrepos
1266 subs = []
1270 subs = []
1267 commitsubs = set()
1271 commitsubs = set()
1268 newstate = wctx.substate.copy()
1272 newstate = wctx.substate.copy()
1269 # only manage subrepos and .hgsubstate if .hgsub is present
1273 # only manage subrepos and .hgsubstate if .hgsub is present
1270 if '.hgsub' in wctx:
1274 if '.hgsub' in wctx:
1271 # we'll decide whether to track this ourselves, thanks
1275 # we'll decide whether to track this ourselves, thanks
1272 if '.hgsubstate' in changes[0]:
1276 if '.hgsubstate' in changes[0]:
1273 changes[0].remove('.hgsubstate')
1277 changes[0].remove('.hgsubstate')
1274 if '.hgsubstate' in changes[2]:
1278 if '.hgsubstate' in changes[2]:
1275 changes[2].remove('.hgsubstate')
1279 changes[2].remove('.hgsubstate')
1276
1280
1277 # compare current state to last committed state
1281 # compare current state to last committed state
1278 # build new substate based on last committed state
1282 # build new substate based on last committed state
1279 oldstate = wctx.p1().substate
1283 oldstate = wctx.p1().substate
1280 for s in sorted(newstate.keys()):
1284 for s in sorted(newstate.keys()):
1281 if not match(s):
1285 if not match(s):
1282 # ignore working copy, use old state if present
1286 # ignore working copy, use old state if present
1283 if s in oldstate:
1287 if s in oldstate:
1284 newstate[s] = oldstate[s]
1288 newstate[s] = oldstate[s]
1285 continue
1289 continue
1286 if not force:
1290 if not force:
1287 raise util.Abort(
1291 raise util.Abort(
1288 _("commit with new subrepo %s excluded") % s)
1292 _("commit with new subrepo %s excluded") % s)
1289 if wctx.sub(s).dirty(True):
1293 if wctx.sub(s).dirty(True):
1290 if not self.ui.configbool('ui', 'commitsubrepos'):
1294 if not self.ui.configbool('ui', 'commitsubrepos'):
1291 raise util.Abort(
1295 raise util.Abort(
1292 _("uncommitted changes in subrepo %s") % s,
1296 _("uncommitted changes in subrepo %s") % s,
1293 hint=_("use --subrepos for recursive commit"))
1297 hint=_("use --subrepos for recursive commit"))
1294 subs.append(s)
1298 subs.append(s)
1295 commitsubs.add(s)
1299 commitsubs.add(s)
1296 else:
1300 else:
1297 bs = wctx.sub(s).basestate()
1301 bs = wctx.sub(s).basestate()
1298 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1302 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1299 if oldstate.get(s, (None, None, None))[1] != bs:
1303 if oldstate.get(s, (None, None, None))[1] != bs:
1300 subs.append(s)
1304 subs.append(s)
1301
1305
1302 # check for removed subrepos
1306 # check for removed subrepos
1303 for p in wctx.parents():
1307 for p in wctx.parents():
1304 r = [s for s in p.substate if s not in newstate]
1308 r = [s for s in p.substate if s not in newstate]
1305 subs += [s for s in r if match(s)]
1309 subs += [s for s in r if match(s)]
1306 if subs:
1310 if subs:
1307 if (not match('.hgsub') and
1311 if (not match('.hgsub') and
1308 '.hgsub' in (wctx.modified() + wctx.added())):
1312 '.hgsub' in (wctx.modified() + wctx.added())):
1309 raise util.Abort(
1313 raise util.Abort(
1310 _("can't commit subrepos without .hgsub"))
1314 _("can't commit subrepos without .hgsub"))
1311 changes[0].insert(0, '.hgsubstate')
1315 changes[0].insert(0, '.hgsubstate')
1312
1316
1313 elif '.hgsub' in changes[2]:
1317 elif '.hgsub' in changes[2]:
1314 # clean up .hgsubstate when .hgsub is removed
1318 # clean up .hgsubstate when .hgsub is removed
1315 if ('.hgsubstate' in wctx and
1319 if ('.hgsubstate' in wctx and
1316 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1320 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1317 changes[2].insert(0, '.hgsubstate')
1321 changes[2].insert(0, '.hgsubstate')
1318
1322
1319 # make sure all explicit patterns are matched
1323 # make sure all explicit patterns are matched
1320 if not force and match.files():
1324 if not force and match.files():
1321 matched = set(changes[0] + changes[1] + changes[2])
1325 matched = set(changes[0] + changes[1] + changes[2])
1322
1326
1323 for f in match.files():
1327 for f in match.files():
1324 f = self.dirstate.normalize(f)
1328 f = self.dirstate.normalize(f)
1325 if f == '.' or f in matched or f in wctx.substate:
1329 if f == '.' or f in matched or f in wctx.substate:
1326 continue
1330 continue
1327 if f in changes[3]: # missing
1331 if f in changes[3]: # missing
1328 fail(f, _('file not found!'))
1332 fail(f, _('file not found!'))
1329 if f in vdirs: # visited directory
1333 if f in vdirs: # visited directory
1330 d = f + '/'
1334 d = f + '/'
1331 for mf in matched:
1335 for mf in matched:
1332 if mf.startswith(d):
1336 if mf.startswith(d):
1333 break
1337 break
1334 else:
1338 else:
1335 fail(f, _("no match under directory!"))
1339 fail(f, _("no match under directory!"))
1336 elif f not in self.dirstate:
1340 elif f not in self.dirstate:
1337 fail(f, _("file not tracked!"))
1341 fail(f, _("file not tracked!"))
1338
1342
1339 if (not force and not extra.get("close") and not merge
1343 if (not force and not extra.get("close") and not merge
1340 and not (changes[0] or changes[1] or changes[2])
1344 and not (changes[0] or changes[1] or changes[2])
1341 and wctx.branch() == wctx.p1().branch()):
1345 and wctx.branch() == wctx.p1().branch()):
1342 return None
1346 return None
1343
1347
1344 if merge and changes[3]:
1348 if merge and changes[3]:
1345 raise util.Abort(_("cannot commit merge with missing files"))
1349 raise util.Abort(_("cannot commit merge with missing files"))
1346
1350
1347 ms = mergemod.mergestate(self)
1351 ms = mergemod.mergestate(self)
1348 for f in changes[0]:
1352 for f in changes[0]:
1349 if f in ms and ms[f] == 'u':
1353 if f in ms and ms[f] == 'u':
1350 raise util.Abort(_("unresolved merge conflicts "
1354 raise util.Abort(_("unresolved merge conflicts "
1351 "(see hg help resolve)"))
1355 "(see hg help resolve)"))
1352
1356
1353 cctx = context.workingctx(self, text, user, date, extra, changes)
1357 cctx = context.workingctx(self, text, user, date, extra, changes)
1354 if editor:
1358 if editor:
1355 cctx._text = editor(self, cctx, subs)
1359 cctx._text = editor(self, cctx, subs)
1356 edited = (text != cctx._text)
1360 edited = (text != cctx._text)
1357
1361
1358 # commit subs and write new state
1362 # commit subs and write new state
1359 if subs:
1363 if subs:
1360 for s in sorted(commitsubs):
1364 for s in sorted(commitsubs):
1361 sub = wctx.sub(s)
1365 sub = wctx.sub(s)
1362 self.ui.status(_('committing subrepository %s\n') %
1366 self.ui.status(_('committing subrepository %s\n') %
1363 subrepo.subrelpath(sub))
1367 subrepo.subrelpath(sub))
1364 sr = sub.commit(cctx._text, user, date)
1368 sr = sub.commit(cctx._text, user, date)
1365 newstate[s] = (newstate[s][0], sr)
1369 newstate[s] = (newstate[s][0], sr)
1366 subrepo.writestate(self, newstate)
1370 subrepo.writestate(self, newstate)
1367
1371
1368 # Save commit message in case this transaction gets rolled back
1372 # Save commit message in case this transaction gets rolled back
1369 # (e.g. by a pretxncommit hook). Leave the content alone on
1373 # (e.g. by a pretxncommit hook). Leave the content alone on
1370 # the assumption that the user will use the same editor again.
1374 # the assumption that the user will use the same editor again.
1371 msgfn = self.savecommitmessage(cctx._text)
1375 msgfn = self.savecommitmessage(cctx._text)
1372
1376
1373 p1, p2 = self.dirstate.parents()
1377 p1, p2 = self.dirstate.parents()
1374 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1378 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1375 try:
1379 try:
1376 self.hook("precommit", throw=True, parent1=hookp1,
1380 self.hook("precommit", throw=True, parent1=hookp1,
1377 parent2=hookp2)
1381 parent2=hookp2)
1378 ret = self.commitctx(cctx, True)
1382 ret = self.commitctx(cctx, True)
1379 except: # re-raises
1383 except: # re-raises
1380 if edited:
1384 if edited:
1381 self.ui.write(
1385 self.ui.write(
1382 _('note: commit message saved in %s\n') % msgfn)
1386 _('note: commit message saved in %s\n') % msgfn)
1383 raise
1387 raise
1384
1388
1385 # update bookmarks, dirstate and mergestate
1389 # update bookmarks, dirstate and mergestate
1386 bookmarks.update(self, [p1, p2], ret)
1390 bookmarks.update(self, [p1, p2], ret)
1387 for f in changes[0] + changes[1]:
1391 for f in changes[0] + changes[1]:
1388 self.dirstate.normal(f)
1392 self.dirstate.normal(f)
1389 for f in changes[2]:
1393 for f in changes[2]:
1390 self.dirstate.drop(f)
1394 self.dirstate.drop(f)
1391 self.dirstate.setparents(ret)
1395 self.dirstate.setparents(ret)
1392 ms.reset()
1396 ms.reset()
1393 finally:
1397 finally:
1394 wlock.release()
1398 wlock.release()
1395
1399
1396 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1400 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1397 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1401 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1398 self._afterlock(commithook)
1402 self._afterlock(commithook)
1399 return ret
1403 return ret
1400
1404
1401 def commitctx(self, ctx, error=False):
1405 def commitctx(self, ctx, error=False):
1402 """Add a new revision to current repository.
1406 """Add a new revision to current repository.
1403 Revision information is passed via the context argument.
1407 Revision information is passed via the context argument.
1404 """
1408 """
1405
1409
1406 tr = lock = None
1410 tr = lock = None
1407 removed = list(ctx.removed())
1411 removed = list(ctx.removed())
1408 p1, p2 = ctx.p1(), ctx.p2()
1412 p1, p2 = ctx.p1(), ctx.p2()
1409 user = ctx.user()
1413 user = ctx.user()
1410
1414
1411 lock = self.lock()
1415 lock = self.lock()
1412 try:
1416 try:
1413 tr = self.transaction("commit")
1417 tr = self.transaction("commit")
1414 trp = weakref.proxy(tr)
1418 trp = weakref.proxy(tr)
1415
1419
1416 if ctx.files():
1420 if ctx.files():
1417 m1 = p1.manifest().copy()
1421 m1 = p1.manifest().copy()
1418 m2 = p2.manifest()
1422 m2 = p2.manifest()
1419
1423
1420 # check in files
1424 # check in files
1421 new = {}
1425 new = {}
1422 changed = []
1426 changed = []
1423 linkrev = len(self)
1427 linkrev = len(self)
1424 for f in sorted(ctx.modified() + ctx.added()):
1428 for f in sorted(ctx.modified() + ctx.added()):
1425 self.ui.note(f + "\n")
1429 self.ui.note(f + "\n")
1426 try:
1430 try:
1427 fctx = ctx[f]
1431 fctx = ctx[f]
1428 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1432 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1429 changed)
1433 changed)
1430 m1.set(f, fctx.flags())
1434 m1.set(f, fctx.flags())
1431 except OSError, inst:
1435 except OSError, inst:
1432 self.ui.warn(_("trouble committing %s!\n") % f)
1436 self.ui.warn(_("trouble committing %s!\n") % f)
1433 raise
1437 raise
1434 except IOError, inst:
1438 except IOError, inst:
1435 errcode = getattr(inst, 'errno', errno.ENOENT)
1439 errcode = getattr(inst, 'errno', errno.ENOENT)
1436 if error or errcode and errcode != errno.ENOENT:
1440 if error or errcode and errcode != errno.ENOENT:
1437 self.ui.warn(_("trouble committing %s!\n") % f)
1441 self.ui.warn(_("trouble committing %s!\n") % f)
1438 raise
1442 raise
1439 else:
1443 else:
1440 removed.append(f)
1444 removed.append(f)
1441
1445
1442 # update manifest
1446 # update manifest
1443 m1.update(new)
1447 m1.update(new)
1444 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1448 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1445 drop = [f for f in removed if f in m1]
1449 drop = [f for f in removed if f in m1]
1446 for f in drop:
1450 for f in drop:
1447 del m1[f]
1451 del m1[f]
1448 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1452 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1449 p2.manifestnode(), (new, drop))
1453 p2.manifestnode(), (new, drop))
1450 files = changed + removed
1454 files = changed + removed
1451 else:
1455 else:
1452 mn = p1.manifestnode()
1456 mn = p1.manifestnode()
1453 files = []
1457 files = []
1454
1458
1455 # update changelog
1459 # update changelog
1456 self.changelog.delayupdate()
1460 self.changelog.delayupdate()
1457 n = self.changelog.add(mn, files, ctx.description(),
1461 n = self.changelog.add(mn, files, ctx.description(),
1458 trp, p1.node(), p2.node(),
1462 trp, p1.node(), p2.node(),
1459 user, ctx.date(), ctx.extra().copy())
1463 user, ctx.date(), ctx.extra().copy())
1460 p = lambda: self.changelog.writepending() and self.root or ""
1464 p = lambda: self.changelog.writepending() and self.root or ""
1461 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1465 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1462 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1466 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1463 parent2=xp2, pending=p)
1467 parent2=xp2, pending=p)
1464 self.changelog.finalize(trp)
1468 self.changelog.finalize(trp)
1465 # set the new commit is proper phase
1469 # set the new commit is proper phase
1466 targetphase = phases.newcommitphase(self.ui)
1470 targetphase = phases.newcommitphase(self.ui)
1467 if targetphase:
1471 if targetphase:
1468 # retract boundary do not alter parent changeset.
1472 # retract boundary do not alter parent changeset.
1469 # if a parent have higher the resulting phase will
1473 # if a parent have higher the resulting phase will
1470 # be compliant anyway
1474 # be compliant anyway
1471 #
1475 #
1472 # if minimal phase was 0 we don't need to retract anything
1476 # if minimal phase was 0 we don't need to retract anything
1473 phases.retractboundary(self, targetphase, [n])
1477 phases.retractboundary(self, targetphase, [n])
1474 tr.close()
1478 tr.close()
1475 self.updatebranchcache()
1479 self.updatebranchcache()
1476 return n
1480 return n
1477 finally:
1481 finally:
1478 if tr:
1482 if tr:
1479 tr.release()
1483 tr.release()
1480 lock.release()
1484 lock.release()
1481
1485
1482 def destroyed(self, newheadnodes=None):
1486 def destroyed(self, newheadnodes=None):
1483 '''Inform the repository that nodes have been destroyed.
1487 '''Inform the repository that nodes have been destroyed.
1484 Intended for use by strip and rollback, so there's a common
1488 Intended for use by strip and rollback, so there's a common
1485 place for anything that has to be done after destroying history.
1489 place for anything that has to be done after destroying history.
1486
1490
1487 If you know the branchheadcache was uptodate before nodes were removed
1491 If you know the branchheadcache was uptodate before nodes were removed
1488 and you also know the set of candidate new heads that may have resulted
1492 and you also know the set of candidate new heads that may have resulted
1489 from the destruction, you can set newheadnodes. This will enable the
1493 from the destruction, you can set newheadnodes. This will enable the
1490 code to update the branchheads cache, rather than having future code
1494 code to update the branchheads cache, rather than having future code
1491 decide it's invalid and regenerating it from scratch.
1495 decide it's invalid and regenerating it from scratch.
1492 '''
1496 '''
1493 # If we have info, newheadnodes, on how to update the branch cache, do
1497 # If we have info, newheadnodes, on how to update the branch cache, do
1494 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1498 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1495 # will be caught the next time it is read.
1499 # will be caught the next time it is read.
1496 if newheadnodes:
1500 if newheadnodes:
1497 tiprev = len(self) - 1
1501 tiprev = len(self) - 1
1498 ctxgen = (self[node] for node in newheadnodes
1502 ctxgen = (self[node] for node in newheadnodes
1499 if self.changelog.hasnode(node))
1503 if self.changelog.hasnode(node))
1500 self._updatebranchcache(self._branchcache, ctxgen)
1504 self._updatebranchcache(self._branchcache, ctxgen)
1501 self._writebranchcache(self._branchcache, self.changelog.tip(),
1505 self._writebranchcache(self._branchcache, self.changelog.tip(),
1502 tiprev)
1506 tiprev)
1503
1507
1504 # Ensure the persistent tag cache is updated. Doing it now
1508 # Ensure the persistent tag cache is updated. Doing it now
1505 # means that the tag cache only has to worry about destroyed
1509 # means that the tag cache only has to worry about destroyed
1506 # heads immediately after a strip/rollback. That in turn
1510 # heads immediately after a strip/rollback. That in turn
1507 # guarantees that "cachetip == currenttip" (comparing both rev
1511 # guarantees that "cachetip == currenttip" (comparing both rev
1508 # and node) always means no nodes have been added or destroyed.
1512 # and node) always means no nodes have been added or destroyed.
1509
1513
1510 # XXX this is suboptimal when qrefresh'ing: we strip the current
1514 # XXX this is suboptimal when qrefresh'ing: we strip the current
1511 # head, refresh the tag cache, then immediately add a new head.
1515 # head, refresh the tag cache, then immediately add a new head.
1512 # But I think doing it this way is necessary for the "instant
1516 # But I think doing it this way is necessary for the "instant
1513 # tag cache retrieval" case to work.
1517 # tag cache retrieval" case to work.
1514 self.invalidatecaches()
1518 self.invalidatecaches()
1515
1519
1516 # Discard all cache entries to force reloading everything.
1520 # Discard all cache entries to force reloading everything.
1517 self._filecache.clear()
1521 self._filecache.clear()
1518
1522
1519 def walk(self, match, node=None):
1523 def walk(self, match, node=None):
1520 '''
1524 '''
1521 walk recursively through the directory tree or a given
1525 walk recursively through the directory tree or a given
1522 changeset, finding all files matched by the match
1526 changeset, finding all files matched by the match
1523 function
1527 function
1524 '''
1528 '''
1525 return self[node].walk(match)
1529 return self[node].walk(match)
1526
1530
1527 def status(self, node1='.', node2=None, match=None,
1531 def status(self, node1='.', node2=None, match=None,
1528 ignored=False, clean=False, unknown=False,
1532 ignored=False, clean=False, unknown=False,
1529 listsubrepos=False):
1533 listsubrepos=False):
1530 """return status of files between two nodes or node and working
1534 """return status of files between two nodes or node and working
1531 directory.
1535 directory.
1532
1536
1533 If node1 is None, use the first dirstate parent instead.
1537 If node1 is None, use the first dirstate parent instead.
1534 If node2 is None, compare node1 with working directory.
1538 If node2 is None, compare node1 with working directory.
1535 """
1539 """
1536
1540
1537 def mfmatches(ctx):
1541 def mfmatches(ctx):
1538 mf = ctx.manifest().copy()
1542 mf = ctx.manifest().copy()
1539 if match.always():
1543 if match.always():
1540 return mf
1544 return mf
1541 for fn in mf.keys():
1545 for fn in mf.keys():
1542 if not match(fn):
1546 if not match(fn):
1543 del mf[fn]
1547 del mf[fn]
1544 return mf
1548 return mf
1545
1549
1546 if isinstance(node1, context.changectx):
1550 if isinstance(node1, context.changectx):
1547 ctx1 = node1
1551 ctx1 = node1
1548 else:
1552 else:
1549 ctx1 = self[node1]
1553 ctx1 = self[node1]
1550 if isinstance(node2, context.changectx):
1554 if isinstance(node2, context.changectx):
1551 ctx2 = node2
1555 ctx2 = node2
1552 else:
1556 else:
1553 ctx2 = self[node2]
1557 ctx2 = self[node2]
1554
1558
1555 working = ctx2.rev() is None
1559 working = ctx2.rev() is None
1556 parentworking = working and ctx1 == self['.']
1560 parentworking = working and ctx1 == self['.']
1557 match = match or matchmod.always(self.root, self.getcwd())
1561 match = match or matchmod.always(self.root, self.getcwd())
1558 listignored, listclean, listunknown = ignored, clean, unknown
1562 listignored, listclean, listunknown = ignored, clean, unknown
1559
1563
1560 # load earliest manifest first for caching reasons
1564 # load earliest manifest first for caching reasons
1561 if not working and ctx2.rev() < ctx1.rev():
1565 if not working and ctx2.rev() < ctx1.rev():
1562 ctx2.manifest()
1566 ctx2.manifest()
1563
1567
1564 if not parentworking:
1568 if not parentworking:
1565 def bad(f, msg):
1569 def bad(f, msg):
1566 # 'f' may be a directory pattern from 'match.files()',
1570 # 'f' may be a directory pattern from 'match.files()',
1567 # so 'f not in ctx1' is not enough
1571 # so 'f not in ctx1' is not enough
1568 if f not in ctx1 and f not in ctx1.dirs():
1572 if f not in ctx1 and f not in ctx1.dirs():
1569 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1573 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1570 match.bad = bad
1574 match.bad = bad
1571
1575
1572 if working: # we need to scan the working dir
1576 if working: # we need to scan the working dir
1573 subrepos = []
1577 subrepos = []
1574 if '.hgsub' in self.dirstate:
1578 if '.hgsub' in self.dirstate:
1575 subrepos = ctx2.substate.keys()
1579 subrepos = ctx2.substate.keys()
1576 s = self.dirstate.status(match, subrepos, listignored,
1580 s = self.dirstate.status(match, subrepos, listignored,
1577 listclean, listunknown)
1581 listclean, listunknown)
1578 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1582 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1579
1583
1580 # check for any possibly clean files
1584 # check for any possibly clean files
1581 if parentworking and cmp:
1585 if parentworking and cmp:
1582 fixup = []
1586 fixup = []
1583 # do a full compare of any files that might have changed
1587 # do a full compare of any files that might have changed
1584 for f in sorted(cmp):
1588 for f in sorted(cmp):
1585 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1589 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1586 or ctx1[f].cmp(ctx2[f])):
1590 or ctx1[f].cmp(ctx2[f])):
1587 modified.append(f)
1591 modified.append(f)
1588 else:
1592 else:
1589 fixup.append(f)
1593 fixup.append(f)
1590
1594
1591 # update dirstate for files that are actually clean
1595 # update dirstate for files that are actually clean
1592 if fixup:
1596 if fixup:
1593 if listclean:
1597 if listclean:
1594 clean += fixup
1598 clean += fixup
1595
1599
1596 try:
1600 try:
1597 # updating the dirstate is optional
1601 # updating the dirstate is optional
1598 # so we don't wait on the lock
1602 # so we don't wait on the lock
1599 wlock = self.wlock(False)
1603 wlock = self.wlock(False)
1600 try:
1604 try:
1601 for f in fixup:
1605 for f in fixup:
1602 self.dirstate.normal(f)
1606 self.dirstate.normal(f)
1603 finally:
1607 finally:
1604 wlock.release()
1608 wlock.release()
1605 except error.LockError:
1609 except error.LockError:
1606 pass
1610 pass
1607
1611
1608 if not parentworking:
1612 if not parentworking:
1609 mf1 = mfmatches(ctx1)
1613 mf1 = mfmatches(ctx1)
1610 if working:
1614 if working:
1611 # we are comparing working dir against non-parent
1615 # we are comparing working dir against non-parent
1612 # generate a pseudo-manifest for the working dir
1616 # generate a pseudo-manifest for the working dir
1613 mf2 = mfmatches(self['.'])
1617 mf2 = mfmatches(self['.'])
1614 for f in cmp + modified + added:
1618 for f in cmp + modified + added:
1615 mf2[f] = None
1619 mf2[f] = None
1616 mf2.set(f, ctx2.flags(f))
1620 mf2.set(f, ctx2.flags(f))
1617 for f in removed:
1621 for f in removed:
1618 if f in mf2:
1622 if f in mf2:
1619 del mf2[f]
1623 del mf2[f]
1620 else:
1624 else:
1621 # we are comparing two revisions
1625 # we are comparing two revisions
1622 deleted, unknown, ignored = [], [], []
1626 deleted, unknown, ignored = [], [], []
1623 mf2 = mfmatches(ctx2)
1627 mf2 = mfmatches(ctx2)
1624
1628
1625 modified, added, clean = [], [], []
1629 modified, added, clean = [], [], []
1626 withflags = mf1.withflags() | mf2.withflags()
1630 withflags = mf1.withflags() | mf2.withflags()
1627 for fn in mf2:
1631 for fn in mf2:
1628 if fn in mf1:
1632 if fn in mf1:
1629 if (fn not in deleted and
1633 if (fn not in deleted and
1630 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1634 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1631 (mf1[fn] != mf2[fn] and
1635 (mf1[fn] != mf2[fn] and
1632 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1636 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1633 modified.append(fn)
1637 modified.append(fn)
1634 elif listclean:
1638 elif listclean:
1635 clean.append(fn)
1639 clean.append(fn)
1636 del mf1[fn]
1640 del mf1[fn]
1637 elif fn not in deleted:
1641 elif fn not in deleted:
1638 added.append(fn)
1642 added.append(fn)
1639 removed = mf1.keys()
1643 removed = mf1.keys()
1640
1644
1641 if working and modified and not self.dirstate._checklink:
1645 if working and modified and not self.dirstate._checklink:
1642 # Symlink placeholders may get non-symlink-like contents
1646 # Symlink placeholders may get non-symlink-like contents
1643 # via user error or dereferencing by NFS or Samba servers,
1647 # via user error or dereferencing by NFS or Samba servers,
1644 # so we filter out any placeholders that don't look like a
1648 # so we filter out any placeholders that don't look like a
1645 # symlink
1649 # symlink
1646 sane = []
1650 sane = []
1647 for f in modified:
1651 for f in modified:
1648 if ctx2.flags(f) == 'l':
1652 if ctx2.flags(f) == 'l':
1649 d = ctx2[f].data()
1653 d = ctx2[f].data()
1650 if len(d) >= 1024 or '\n' in d or util.binary(d):
1654 if len(d) >= 1024 or '\n' in d or util.binary(d):
1651 self.ui.debug('ignoring suspect symlink placeholder'
1655 self.ui.debug('ignoring suspect symlink placeholder'
1652 ' "%s"\n' % f)
1656 ' "%s"\n' % f)
1653 continue
1657 continue
1654 sane.append(f)
1658 sane.append(f)
1655 modified = sane
1659 modified = sane
1656
1660
1657 r = modified, added, removed, deleted, unknown, ignored, clean
1661 r = modified, added, removed, deleted, unknown, ignored, clean
1658
1662
1659 if listsubrepos:
1663 if listsubrepos:
1660 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1664 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1661 if working:
1665 if working:
1662 rev2 = None
1666 rev2 = None
1663 else:
1667 else:
1664 rev2 = ctx2.substate[subpath][1]
1668 rev2 = ctx2.substate[subpath][1]
1665 try:
1669 try:
1666 submatch = matchmod.narrowmatcher(subpath, match)
1670 submatch = matchmod.narrowmatcher(subpath, match)
1667 s = sub.status(rev2, match=submatch, ignored=listignored,
1671 s = sub.status(rev2, match=submatch, ignored=listignored,
1668 clean=listclean, unknown=listunknown,
1672 clean=listclean, unknown=listunknown,
1669 listsubrepos=True)
1673 listsubrepos=True)
1670 for rfiles, sfiles in zip(r, s):
1674 for rfiles, sfiles in zip(r, s):
1671 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1675 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1672 except error.LookupError:
1676 except error.LookupError:
1673 self.ui.status(_("skipping missing subrepository: %s\n")
1677 self.ui.status(_("skipping missing subrepository: %s\n")
1674 % subpath)
1678 % subpath)
1675
1679
1676 for l in r:
1680 for l in r:
1677 l.sort()
1681 l.sort()
1678 return r
1682 return r
1679
1683
1680 def heads(self, start=None):
1684 def heads(self, start=None):
1681 heads = self.changelog.heads(start)
1685 heads = self.changelog.heads(start)
1682 # sort the output in rev descending order
1686 # sort the output in rev descending order
1683 return sorted(heads, key=self.changelog.rev, reverse=True)
1687 return sorted(heads, key=self.changelog.rev, reverse=True)
1684
1688
1685 def branchheads(self, branch=None, start=None, closed=False):
1689 def branchheads(self, branch=None, start=None, closed=False):
1686 '''return a (possibly filtered) list of heads for the given branch
1690 '''return a (possibly filtered) list of heads for the given branch
1687
1691
1688 Heads are returned in topological order, from newest to oldest.
1692 Heads are returned in topological order, from newest to oldest.
1689 If branch is None, use the dirstate branch.
1693 If branch is None, use the dirstate branch.
1690 If start is not None, return only heads reachable from start.
1694 If start is not None, return only heads reachable from start.
1691 If closed is True, return heads that are marked as closed as well.
1695 If closed is True, return heads that are marked as closed as well.
1692 '''
1696 '''
1693 if branch is None:
1697 if branch is None:
1694 branch = self[None].branch()
1698 branch = self[None].branch()
1695 branches = self.branchmap()
1699 branches = self.branchmap()
1696 if branch not in branches:
1700 if branch not in branches:
1697 return []
1701 return []
1698 # the cache returns heads ordered lowest to highest
1702 # the cache returns heads ordered lowest to highest
1699 bheads = list(reversed(branches[branch]))
1703 bheads = list(reversed(branches[branch]))
1700 if start is not None:
1704 if start is not None:
1701 # filter out the heads that cannot be reached from startrev
1705 # filter out the heads that cannot be reached from startrev
1702 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1706 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1703 bheads = [h for h in bheads if h in fbheads]
1707 bheads = [h for h in bheads if h in fbheads]
1704 if not closed:
1708 if not closed:
1705 bheads = [h for h in bheads if not self[h].closesbranch()]
1709 bheads = [h for h in bheads if not self[h].closesbranch()]
1706 return bheads
1710 return bheads
1707
1711
1708 def branches(self, nodes):
1712 def branches(self, nodes):
1709 if not nodes:
1713 if not nodes:
1710 nodes = [self.changelog.tip()]
1714 nodes = [self.changelog.tip()]
1711 b = []
1715 b = []
1712 for n in nodes:
1716 for n in nodes:
1713 t = n
1717 t = n
1714 while True:
1718 while True:
1715 p = self.changelog.parents(n)
1719 p = self.changelog.parents(n)
1716 if p[1] != nullid or p[0] == nullid:
1720 if p[1] != nullid or p[0] == nullid:
1717 b.append((t, n, p[0], p[1]))
1721 b.append((t, n, p[0], p[1]))
1718 break
1722 break
1719 n = p[0]
1723 n = p[0]
1720 return b
1724 return b
1721
1725
1722 def between(self, pairs):
1726 def between(self, pairs):
1723 r = []
1727 r = []
1724
1728
1725 for top, bottom in pairs:
1729 for top, bottom in pairs:
1726 n, l, i = top, [], 0
1730 n, l, i = top, [], 0
1727 f = 1
1731 f = 1
1728
1732
1729 while n != bottom and n != nullid:
1733 while n != bottom and n != nullid:
1730 p = self.changelog.parents(n)[0]
1734 p = self.changelog.parents(n)[0]
1731 if i == f:
1735 if i == f:
1732 l.append(n)
1736 l.append(n)
1733 f = f * 2
1737 f = f * 2
1734 n = p
1738 n = p
1735 i += 1
1739 i += 1
1736
1740
1737 r.append(l)
1741 r.append(l)
1738
1742
1739 return r
1743 return r
1740
1744
1741 def pull(self, remote, heads=None, force=False):
1745 def pull(self, remote, heads=None, force=False):
1742 # don't open transaction for nothing or you break future useful
1746 # don't open transaction for nothing or you break future useful
1743 # rollback call
1747 # rollback call
1744 tr = None
1748 tr = None
1745 trname = 'pull\n' + util.hidepassword(remote.url())
1749 trname = 'pull\n' + util.hidepassword(remote.url())
1746 lock = self.lock()
1750 lock = self.lock()
1747 try:
1751 try:
1748 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1752 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1749 force=force)
1753 force=force)
1750 common, fetch, rheads = tmp
1754 common, fetch, rheads = tmp
1751 if not fetch:
1755 if not fetch:
1752 self.ui.status(_("no changes found\n"))
1756 self.ui.status(_("no changes found\n"))
1753 added = []
1757 added = []
1754 result = 0
1758 result = 0
1755 else:
1759 else:
1756 tr = self.transaction(trname)
1760 tr = self.transaction(trname)
1757 if heads is None and list(common) == [nullid]:
1761 if heads is None and list(common) == [nullid]:
1758 self.ui.status(_("requesting all changes\n"))
1762 self.ui.status(_("requesting all changes\n"))
1759 elif heads is None and remote.capable('changegroupsubset'):
1763 elif heads is None and remote.capable('changegroupsubset'):
1760 # issue1320, avoid a race if remote changed after discovery
1764 # issue1320, avoid a race if remote changed after discovery
1761 heads = rheads
1765 heads = rheads
1762
1766
1763 if remote.capable('getbundle'):
1767 if remote.capable('getbundle'):
1764 cg = remote.getbundle('pull', common=common,
1768 cg = remote.getbundle('pull', common=common,
1765 heads=heads or rheads)
1769 heads=heads or rheads)
1766 elif heads is None:
1770 elif heads is None:
1767 cg = remote.changegroup(fetch, 'pull')
1771 cg = remote.changegroup(fetch, 'pull')
1768 elif not remote.capable('changegroupsubset'):
1772 elif not remote.capable('changegroupsubset'):
1769 raise util.Abort(_("partial pull cannot be done because "
1773 raise util.Abort(_("partial pull cannot be done because "
1770 "other repository doesn't support "
1774 "other repository doesn't support "
1771 "changegroupsubset."))
1775 "changegroupsubset."))
1772 else:
1776 else:
1773 cg = remote.changegroupsubset(fetch, heads, 'pull')
1777 cg = remote.changegroupsubset(fetch, heads, 'pull')
1774 clstart = len(self.changelog)
1778 clstart = len(self.changelog)
1775 result = self.addchangegroup(cg, 'pull', remote.url())
1779 result = self.addchangegroup(cg, 'pull', remote.url())
1776 clend = len(self.changelog)
1780 clend = len(self.changelog)
1777 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1781 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1778
1782
1779 # compute target subset
1783 # compute target subset
1780 if heads is None:
1784 if heads is None:
1781 # We pulled every thing possible
1785 # We pulled every thing possible
1782 # sync on everything common
1786 # sync on everything common
1783 subset = common + added
1787 subset = common + added
1784 else:
1788 else:
1785 # We pulled a specific subset
1789 # We pulled a specific subset
1786 # sync on this subset
1790 # sync on this subset
1787 subset = heads
1791 subset = heads
1788
1792
1789 # Get remote phases data from remote
1793 # Get remote phases data from remote
1790 remotephases = remote.listkeys('phases')
1794 remotephases = remote.listkeys('phases')
1791 publishing = bool(remotephases.get('publishing', False))
1795 publishing = bool(remotephases.get('publishing', False))
1792 if remotephases and not publishing:
1796 if remotephases and not publishing:
1793 # remote is new and unpublishing
1797 # remote is new and unpublishing
1794 pheads, _dr = phases.analyzeremotephases(self, subset,
1798 pheads, _dr = phases.analyzeremotephases(self, subset,
1795 remotephases)
1799 remotephases)
1796 phases.advanceboundary(self, phases.public, pheads)
1800 phases.advanceboundary(self, phases.public, pheads)
1797 phases.advanceboundary(self, phases.draft, subset)
1801 phases.advanceboundary(self, phases.draft, subset)
1798 else:
1802 else:
1799 # Remote is old or publishing all common changesets
1803 # Remote is old or publishing all common changesets
1800 # should be seen as public
1804 # should be seen as public
1801 phases.advanceboundary(self, phases.public, subset)
1805 phases.advanceboundary(self, phases.public, subset)
1802
1806
1803 if obsolete._enabled:
1807 if obsolete._enabled:
1804 self.ui.debug('fetching remote obsolete markers')
1808 self.ui.debug('fetching remote obsolete markers')
1805 remoteobs = remote.listkeys('obsolete')
1809 remoteobs = remote.listkeys('obsolete')
1806 if 'dump0' in remoteobs:
1810 if 'dump0' in remoteobs:
1807 if tr is None:
1811 if tr is None:
1808 tr = self.transaction(trname)
1812 tr = self.transaction(trname)
1809 for key in sorted(remoteobs, reverse=True):
1813 for key in sorted(remoteobs, reverse=True):
1810 if key.startswith('dump'):
1814 if key.startswith('dump'):
1811 data = base85.b85decode(remoteobs[key])
1815 data = base85.b85decode(remoteobs[key])
1812 self.obsstore.mergemarkers(tr, data)
1816 self.obsstore.mergemarkers(tr, data)
1813 if tr is not None:
1817 if tr is not None:
1814 tr.close()
1818 tr.close()
1815 finally:
1819 finally:
1816 if tr is not None:
1820 if tr is not None:
1817 tr.release()
1821 tr.release()
1818 lock.release()
1822 lock.release()
1819
1823
1820 return result
1824 return result
1821
1825
1822 def checkpush(self, force, revs):
1826 def checkpush(self, force, revs):
1823 """Extensions can override this function if additional checks have
1827 """Extensions can override this function if additional checks have
1824 to be performed before pushing, or call it if they override push
1828 to be performed before pushing, or call it if they override push
1825 command.
1829 command.
1826 """
1830 """
1827 pass
1831 pass
1828
1832
1829 def push(self, remote, force=False, revs=None, newbranch=False):
1833 def push(self, remote, force=False, revs=None, newbranch=False):
1830 '''Push outgoing changesets (limited by revs) from the current
1834 '''Push outgoing changesets (limited by revs) from the current
1831 repository to remote. Return an integer:
1835 repository to remote. Return an integer:
1832 - None means nothing to push
1836 - None means nothing to push
1833 - 0 means HTTP error
1837 - 0 means HTTP error
1834 - 1 means we pushed and remote head count is unchanged *or*
1838 - 1 means we pushed and remote head count is unchanged *or*
1835 we have outgoing changesets but refused to push
1839 we have outgoing changesets but refused to push
1836 - other values as described by addchangegroup()
1840 - other values as described by addchangegroup()
1837 '''
1841 '''
1838 # there are two ways to push to remote repo:
1842 # there are two ways to push to remote repo:
1839 #
1843 #
1840 # addchangegroup assumes local user can lock remote
1844 # addchangegroup assumes local user can lock remote
1841 # repo (local filesystem, old ssh servers).
1845 # repo (local filesystem, old ssh servers).
1842 #
1846 #
1843 # unbundle assumes local user cannot lock remote repo (new ssh
1847 # unbundle assumes local user cannot lock remote repo (new ssh
1844 # servers, http servers).
1848 # servers, http servers).
1845
1849
1846 if not remote.canpush():
1850 if not remote.canpush():
1847 raise util.Abort(_("destination does not support push"))
1851 raise util.Abort(_("destination does not support push"))
1848 # get local lock as we might write phase data
1852 # get local lock as we might write phase data
1849 locallock = self.lock()
1853 locallock = self.lock()
1850 try:
1854 try:
1851 self.checkpush(force, revs)
1855 self.checkpush(force, revs)
1852 lock = None
1856 lock = None
1853 unbundle = remote.capable('unbundle')
1857 unbundle = remote.capable('unbundle')
1854 if not unbundle:
1858 if not unbundle:
1855 lock = remote.lock()
1859 lock = remote.lock()
1856 try:
1860 try:
1857 # discovery
1861 # discovery
1858 fci = discovery.findcommonincoming
1862 fci = discovery.findcommonincoming
1859 commoninc = fci(self, remote, force=force)
1863 commoninc = fci(self, remote, force=force)
1860 common, inc, remoteheads = commoninc
1864 common, inc, remoteheads = commoninc
1861 fco = discovery.findcommonoutgoing
1865 fco = discovery.findcommonoutgoing
1862 outgoing = fco(self, remote, onlyheads=revs,
1866 outgoing = fco(self, remote, onlyheads=revs,
1863 commoninc=commoninc, force=force)
1867 commoninc=commoninc, force=force)
1864
1868
1865
1869
1866 if not outgoing.missing:
1870 if not outgoing.missing:
1867 # nothing to push
1871 # nothing to push
1868 scmutil.nochangesfound(self.ui, self, outgoing.excluded)
1872 scmutil.nochangesfound(self.ui, self, outgoing.excluded)
1869 ret = None
1873 ret = None
1870 else:
1874 else:
1871 # something to push
1875 # something to push
1872 if not force:
1876 if not force:
1873 # if self.obsstore == False --> no obsolete
1877 # if self.obsstore == False --> no obsolete
1874 # then, save the iteration
1878 # then, save the iteration
1875 if self.obsstore:
1879 if self.obsstore:
1876 # this message are here for 80 char limit reason
1880 # this message are here for 80 char limit reason
1877 mso = _("push includes an obsolete changeset: %s!")
1881 mso = _("push includes an obsolete changeset: %s!")
1878 msu = _("push includes an unstable changeset: %s!")
1882 msu = _("push includes an unstable changeset: %s!")
1879 # If we are to push if there is at least one
1883 # If we are to push if there is at least one
1880 # obsolete or unstable changeset in missing, at
1884 # obsolete or unstable changeset in missing, at
1881 # least one of the missinghead will be obsolete or
1885 # least one of the missinghead will be obsolete or
1882 # unstable. So checking heads only is ok
1886 # unstable. So checking heads only is ok
1883 for node in outgoing.missingheads:
1887 for node in outgoing.missingheads:
1884 ctx = self[node]
1888 ctx = self[node]
1885 if ctx.obsolete():
1889 if ctx.obsolete():
1886 raise util.Abort(_(mso) % ctx)
1890 raise util.Abort(_(mso) % ctx)
1887 elif ctx.unstable():
1891 elif ctx.unstable():
1888 raise util.Abort(_(msu) % ctx)
1892 raise util.Abort(_(msu) % ctx)
1889 discovery.checkheads(self, remote, outgoing,
1893 discovery.checkheads(self, remote, outgoing,
1890 remoteheads, newbranch,
1894 remoteheads, newbranch,
1891 bool(inc))
1895 bool(inc))
1892
1896
1893 # create a changegroup from local
1897 # create a changegroup from local
1894 if revs is None and not outgoing.excluded:
1898 if revs is None and not outgoing.excluded:
1895 # push everything,
1899 # push everything,
1896 # use the fast path, no race possible on push
1900 # use the fast path, no race possible on push
1897 cg = self._changegroup(outgoing.missing, 'push')
1901 cg = self._changegroup(outgoing.missing, 'push')
1898 else:
1902 else:
1899 cg = self.getlocalbundle('push', outgoing)
1903 cg = self.getlocalbundle('push', outgoing)
1900
1904
1901 # apply changegroup to remote
1905 # apply changegroup to remote
1902 if unbundle:
1906 if unbundle:
1903 # local repo finds heads on server, finds out what
1907 # local repo finds heads on server, finds out what
1904 # revs it must push. once revs transferred, if server
1908 # revs it must push. once revs transferred, if server
1905 # finds it has different heads (someone else won
1909 # finds it has different heads (someone else won
1906 # commit/push race), server aborts.
1910 # commit/push race), server aborts.
1907 if force:
1911 if force:
1908 remoteheads = ['force']
1912 remoteheads = ['force']
1909 # ssh: return remote's addchangegroup()
1913 # ssh: return remote's addchangegroup()
1910 # http: return remote's addchangegroup() or 0 for error
1914 # http: return remote's addchangegroup() or 0 for error
1911 ret = remote.unbundle(cg, remoteheads, 'push')
1915 ret = remote.unbundle(cg, remoteheads, 'push')
1912 else:
1916 else:
1913 # we return an integer indicating remote head count
1917 # we return an integer indicating remote head count
1914 # change
1918 # change
1915 ret = remote.addchangegroup(cg, 'push', self.url())
1919 ret = remote.addchangegroup(cg, 'push', self.url())
1916
1920
1917 if ret:
1921 if ret:
1918 # push succeed, synchronize target of the push
1922 # push succeed, synchronize target of the push
1919 cheads = outgoing.missingheads
1923 cheads = outgoing.missingheads
1920 elif revs is None:
1924 elif revs is None:
1921 # All out push fails. synchronize all common
1925 # All out push fails. synchronize all common
1922 cheads = outgoing.commonheads
1926 cheads = outgoing.commonheads
1923 else:
1927 else:
1924 # I want cheads = heads(::missingheads and ::commonheads)
1928 # I want cheads = heads(::missingheads and ::commonheads)
1925 # (missingheads is revs with secret changeset filtered out)
1929 # (missingheads is revs with secret changeset filtered out)
1926 #
1930 #
1927 # This can be expressed as:
1931 # This can be expressed as:
1928 # cheads = ( (missingheads and ::commonheads)
1932 # cheads = ( (missingheads and ::commonheads)
1929 # + (commonheads and ::missingheads))"
1933 # + (commonheads and ::missingheads))"
1930 # )
1934 # )
1931 #
1935 #
1932 # while trying to push we already computed the following:
1936 # while trying to push we already computed the following:
1933 # common = (::commonheads)
1937 # common = (::commonheads)
1934 # missing = ((commonheads::missingheads) - commonheads)
1938 # missing = ((commonheads::missingheads) - commonheads)
1935 #
1939 #
1936 # We can pick:
1940 # We can pick:
1937 # * missingheads part of common (::commonheads)
1941 # * missingheads part of common (::commonheads)
1938 common = set(outgoing.common)
1942 common = set(outgoing.common)
1939 cheads = [node for node in revs if node in common]
1943 cheads = [node for node in revs if node in common]
1940 # and
1944 # and
1941 # * commonheads parents on missing
1945 # * commonheads parents on missing
1942 revset = self.set('%ln and parents(roots(%ln))',
1946 revset = self.set('%ln and parents(roots(%ln))',
1943 outgoing.commonheads,
1947 outgoing.commonheads,
1944 outgoing.missing)
1948 outgoing.missing)
1945 cheads.extend(c.node() for c in revset)
1949 cheads.extend(c.node() for c in revset)
1946 # even when we don't push, exchanging phase data is useful
1950 # even when we don't push, exchanging phase data is useful
1947 remotephases = remote.listkeys('phases')
1951 remotephases = remote.listkeys('phases')
1948 if not remotephases: # old server or public only repo
1952 if not remotephases: # old server or public only repo
1949 phases.advanceboundary(self, phases.public, cheads)
1953 phases.advanceboundary(self, phases.public, cheads)
1950 # don't push any phase data as there is nothing to push
1954 # don't push any phase data as there is nothing to push
1951 else:
1955 else:
1952 ana = phases.analyzeremotephases(self, cheads, remotephases)
1956 ana = phases.analyzeremotephases(self, cheads, remotephases)
1953 pheads, droots = ana
1957 pheads, droots = ana
1954 ### Apply remote phase on local
1958 ### Apply remote phase on local
1955 if remotephases.get('publishing', False):
1959 if remotephases.get('publishing', False):
1956 phases.advanceboundary(self, phases.public, cheads)
1960 phases.advanceboundary(self, phases.public, cheads)
1957 else: # publish = False
1961 else: # publish = False
1958 phases.advanceboundary(self, phases.public, pheads)
1962 phases.advanceboundary(self, phases.public, pheads)
1959 phases.advanceboundary(self, phases.draft, cheads)
1963 phases.advanceboundary(self, phases.draft, cheads)
1960 ### Apply local phase on remote
1964 ### Apply local phase on remote
1961
1965
1962 # Get the list of all revs draft on remote by public here.
1966 # Get the list of all revs draft on remote by public here.
1963 # XXX Beware that revset break if droots is not strictly
1967 # XXX Beware that revset break if droots is not strictly
1964 # XXX root we may want to ensure it is but it is costly
1968 # XXX root we may want to ensure it is but it is costly
1965 outdated = self.set('heads((%ln::%ln) and public())',
1969 outdated = self.set('heads((%ln::%ln) and public())',
1966 droots, cheads)
1970 droots, cheads)
1967 for newremotehead in outdated:
1971 for newremotehead in outdated:
1968 r = remote.pushkey('phases',
1972 r = remote.pushkey('phases',
1969 newremotehead.hex(),
1973 newremotehead.hex(),
1970 str(phases.draft),
1974 str(phases.draft),
1971 str(phases.public))
1975 str(phases.public))
1972 if not r:
1976 if not r:
1973 self.ui.warn(_('updating %s to public failed!\n')
1977 self.ui.warn(_('updating %s to public failed!\n')
1974 % newremotehead)
1978 % newremotehead)
1975 self.ui.debug('try to push obsolete markers to remote\n')
1979 self.ui.debug('try to push obsolete markers to remote\n')
1976 if (obsolete._enabled and self.obsstore and
1980 if (obsolete._enabled and self.obsstore and
1977 'obsolete' in remote.listkeys('namespaces')):
1981 'obsolete' in remote.listkeys('namespaces')):
1978 rslts = []
1982 rslts = []
1979 remotedata = self.listkeys('obsolete')
1983 remotedata = self.listkeys('obsolete')
1980 for key in sorted(remotedata, reverse=True):
1984 for key in sorted(remotedata, reverse=True):
1981 # reverse sort to ensure we end with dump0
1985 # reverse sort to ensure we end with dump0
1982 data = remotedata[key]
1986 data = remotedata[key]
1983 rslts.append(remote.pushkey('obsolete', key, '', data))
1987 rslts.append(remote.pushkey('obsolete', key, '', data))
1984 if [r for r in rslts if not r]:
1988 if [r for r in rslts if not r]:
1985 msg = _('failed to push some obsolete markers!\n')
1989 msg = _('failed to push some obsolete markers!\n')
1986 self.ui.warn(msg)
1990 self.ui.warn(msg)
1987 finally:
1991 finally:
1988 if lock is not None:
1992 if lock is not None:
1989 lock.release()
1993 lock.release()
1990 finally:
1994 finally:
1991 locallock.release()
1995 locallock.release()
1992
1996
1993 self.ui.debug("checking for updated bookmarks\n")
1997 self.ui.debug("checking for updated bookmarks\n")
1994 rb = remote.listkeys('bookmarks')
1998 rb = remote.listkeys('bookmarks')
1995 for k in rb.keys():
1999 for k in rb.keys():
1996 if k in self._bookmarks:
2000 if k in self._bookmarks:
1997 nr, nl = rb[k], hex(self._bookmarks[k])
2001 nr, nl = rb[k], hex(self._bookmarks[k])
1998 if nr in self:
2002 if nr in self:
1999 cr = self[nr]
2003 cr = self[nr]
2000 cl = self[nl]
2004 cl = self[nl]
2001 if bookmarks.validdest(self, cr, cl):
2005 if bookmarks.validdest(self, cr, cl):
2002 r = remote.pushkey('bookmarks', k, nr, nl)
2006 r = remote.pushkey('bookmarks', k, nr, nl)
2003 if r:
2007 if r:
2004 self.ui.status(_("updating bookmark %s\n") % k)
2008 self.ui.status(_("updating bookmark %s\n") % k)
2005 else:
2009 else:
2006 self.ui.warn(_('updating bookmark %s'
2010 self.ui.warn(_('updating bookmark %s'
2007 ' failed!\n') % k)
2011 ' failed!\n') % k)
2008
2012
2009 return ret
2013 return ret
2010
2014
2011 def changegroupinfo(self, nodes, source):
2015 def changegroupinfo(self, nodes, source):
2012 if self.ui.verbose or source == 'bundle':
2016 if self.ui.verbose or source == 'bundle':
2013 self.ui.status(_("%d changesets found\n") % len(nodes))
2017 self.ui.status(_("%d changesets found\n") % len(nodes))
2014 if self.ui.debugflag:
2018 if self.ui.debugflag:
2015 self.ui.debug("list of changesets:\n")
2019 self.ui.debug("list of changesets:\n")
2016 for node in nodes:
2020 for node in nodes:
2017 self.ui.debug("%s\n" % hex(node))
2021 self.ui.debug("%s\n" % hex(node))
2018
2022
2019 def changegroupsubset(self, bases, heads, source):
2023 def changegroupsubset(self, bases, heads, source):
2020 """Compute a changegroup consisting of all the nodes that are
2024 """Compute a changegroup consisting of all the nodes that are
2021 descendants of any of the bases and ancestors of any of the heads.
2025 descendants of any of the bases and ancestors of any of the heads.
2022 Return a chunkbuffer object whose read() method will return
2026 Return a chunkbuffer object whose read() method will return
2023 successive changegroup chunks.
2027 successive changegroup chunks.
2024
2028
2025 It is fairly complex as determining which filenodes and which
2029 It is fairly complex as determining which filenodes and which
2026 manifest nodes need to be included for the changeset to be complete
2030 manifest nodes need to be included for the changeset to be complete
2027 is non-trivial.
2031 is non-trivial.
2028
2032
2029 Another wrinkle is doing the reverse, figuring out which changeset in
2033 Another wrinkle is doing the reverse, figuring out which changeset in
2030 the changegroup a particular filenode or manifestnode belongs to.
2034 the changegroup a particular filenode or manifestnode belongs to.
2031 """
2035 """
2032 cl = self.changelog
2036 cl = self.changelog
2033 if not bases:
2037 if not bases:
2034 bases = [nullid]
2038 bases = [nullid]
2035 csets, bases, heads = cl.nodesbetween(bases, heads)
2039 csets, bases, heads = cl.nodesbetween(bases, heads)
2036 # We assume that all ancestors of bases are known
2040 # We assume that all ancestors of bases are known
2037 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2041 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2038 return self._changegroupsubset(common, csets, heads, source)
2042 return self._changegroupsubset(common, csets, heads, source)
2039
2043
2040 def getlocalbundle(self, source, outgoing):
2044 def getlocalbundle(self, source, outgoing):
2041 """Like getbundle, but taking a discovery.outgoing as an argument.
2045 """Like getbundle, but taking a discovery.outgoing as an argument.
2042
2046
2043 This is only implemented for local repos and reuses potentially
2047 This is only implemented for local repos and reuses potentially
2044 precomputed sets in outgoing."""
2048 precomputed sets in outgoing."""
2045 if not outgoing.missing:
2049 if not outgoing.missing:
2046 return None
2050 return None
2047 return self._changegroupsubset(outgoing.common,
2051 return self._changegroupsubset(outgoing.common,
2048 outgoing.missing,
2052 outgoing.missing,
2049 outgoing.missingheads,
2053 outgoing.missingheads,
2050 source)
2054 source)
2051
2055
2052 def getbundle(self, source, heads=None, common=None):
2056 def getbundle(self, source, heads=None, common=None):
2053 """Like changegroupsubset, but returns the set difference between the
2057 """Like changegroupsubset, but returns the set difference between the
2054 ancestors of heads and the ancestors common.
2058 ancestors of heads and the ancestors common.
2055
2059
2056 If heads is None, use the local heads. If common is None, use [nullid].
2060 If heads is None, use the local heads. If common is None, use [nullid].
2057
2061
2058 The nodes in common might not all be known locally due to the way the
2062 The nodes in common might not all be known locally due to the way the
2059 current discovery protocol works.
2063 current discovery protocol works.
2060 """
2064 """
2061 cl = self.changelog
2065 cl = self.changelog
2062 if common:
2066 if common:
2063 nm = cl.nodemap
2067 nm = cl.nodemap
2064 common = [n for n in common if n in nm]
2068 common = [n for n in common if n in nm]
2065 else:
2069 else:
2066 common = [nullid]
2070 common = [nullid]
2067 if not heads:
2071 if not heads:
2068 heads = cl.heads()
2072 heads = cl.heads()
2069 return self.getlocalbundle(source,
2073 return self.getlocalbundle(source,
2070 discovery.outgoing(cl, common, heads))
2074 discovery.outgoing(cl, common, heads))
2071
2075
2072 def _changegroupsubset(self, commonrevs, csets, heads, source):
2076 def _changegroupsubset(self, commonrevs, csets, heads, source):
2073
2077
2074 cl = self.changelog
2078 cl = self.changelog
2075 mf = self.manifest
2079 mf = self.manifest
2076 mfs = {} # needed manifests
2080 mfs = {} # needed manifests
2077 fnodes = {} # needed file nodes
2081 fnodes = {} # needed file nodes
2078 changedfiles = set()
2082 changedfiles = set()
2079 fstate = ['', {}]
2083 fstate = ['', {}]
2080 count = [0, 0]
2084 count = [0, 0]
2081
2085
2082 # can we go through the fast path ?
2086 # can we go through the fast path ?
2083 heads.sort()
2087 heads.sort()
2084 if heads == sorted(self.heads()):
2088 if heads == sorted(self.heads()):
2085 return self._changegroup(csets, source)
2089 return self._changegroup(csets, source)
2086
2090
2087 # slow path
2091 # slow path
2088 self.hook('preoutgoing', throw=True, source=source)
2092 self.hook('preoutgoing', throw=True, source=source)
2089 self.changegroupinfo(csets, source)
2093 self.changegroupinfo(csets, source)
2090
2094
2091 # filter any nodes that claim to be part of the known set
2095 # filter any nodes that claim to be part of the known set
2092 def prune(revlog, missing):
2096 def prune(revlog, missing):
2093 rr, rl = revlog.rev, revlog.linkrev
2097 rr, rl = revlog.rev, revlog.linkrev
2094 return [n for n in missing
2098 return [n for n in missing
2095 if rl(rr(n)) not in commonrevs]
2099 if rl(rr(n)) not in commonrevs]
2096
2100
2097 progress = self.ui.progress
2101 progress = self.ui.progress
2098 _bundling = _('bundling')
2102 _bundling = _('bundling')
2099 _changesets = _('changesets')
2103 _changesets = _('changesets')
2100 _manifests = _('manifests')
2104 _manifests = _('manifests')
2101 _files = _('files')
2105 _files = _('files')
2102
2106
2103 def lookup(revlog, x):
2107 def lookup(revlog, x):
2104 if revlog == cl:
2108 if revlog == cl:
2105 c = cl.read(x)
2109 c = cl.read(x)
2106 changedfiles.update(c[3])
2110 changedfiles.update(c[3])
2107 mfs.setdefault(c[0], x)
2111 mfs.setdefault(c[0], x)
2108 count[0] += 1
2112 count[0] += 1
2109 progress(_bundling, count[0],
2113 progress(_bundling, count[0],
2110 unit=_changesets, total=count[1])
2114 unit=_changesets, total=count[1])
2111 return x
2115 return x
2112 elif revlog == mf:
2116 elif revlog == mf:
2113 clnode = mfs[x]
2117 clnode = mfs[x]
2114 mdata = mf.readfast(x)
2118 mdata = mf.readfast(x)
2115 for f, n in mdata.iteritems():
2119 for f, n in mdata.iteritems():
2116 if f in changedfiles:
2120 if f in changedfiles:
2117 fnodes[f].setdefault(n, clnode)
2121 fnodes[f].setdefault(n, clnode)
2118 count[0] += 1
2122 count[0] += 1
2119 progress(_bundling, count[0],
2123 progress(_bundling, count[0],
2120 unit=_manifests, total=count[1])
2124 unit=_manifests, total=count[1])
2121 return clnode
2125 return clnode
2122 else:
2126 else:
2123 progress(_bundling, count[0], item=fstate[0],
2127 progress(_bundling, count[0], item=fstate[0],
2124 unit=_files, total=count[1])
2128 unit=_files, total=count[1])
2125 return fstate[1][x]
2129 return fstate[1][x]
2126
2130
2127 bundler = changegroup.bundle10(lookup)
2131 bundler = changegroup.bundle10(lookup)
2128 reorder = self.ui.config('bundle', 'reorder', 'auto')
2132 reorder = self.ui.config('bundle', 'reorder', 'auto')
2129 if reorder == 'auto':
2133 if reorder == 'auto':
2130 reorder = None
2134 reorder = None
2131 else:
2135 else:
2132 reorder = util.parsebool(reorder)
2136 reorder = util.parsebool(reorder)
2133
2137
2134 def gengroup():
2138 def gengroup():
2135 # Create a changenode group generator that will call our functions
2139 # Create a changenode group generator that will call our functions
2136 # back to lookup the owning changenode and collect information.
2140 # back to lookup the owning changenode and collect information.
2137 count[:] = [0, len(csets)]
2141 count[:] = [0, len(csets)]
2138 for chunk in cl.group(csets, bundler, reorder=reorder):
2142 for chunk in cl.group(csets, bundler, reorder=reorder):
2139 yield chunk
2143 yield chunk
2140 progress(_bundling, None)
2144 progress(_bundling, None)
2141
2145
2142 # Create a generator for the manifestnodes that calls our lookup
2146 # Create a generator for the manifestnodes that calls our lookup
2143 # and data collection functions back.
2147 # and data collection functions back.
2144 for f in changedfiles:
2148 for f in changedfiles:
2145 fnodes[f] = {}
2149 fnodes[f] = {}
2146 count[:] = [0, len(mfs)]
2150 count[:] = [0, len(mfs)]
2147 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2151 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2148 yield chunk
2152 yield chunk
2149 progress(_bundling, None)
2153 progress(_bundling, None)
2150
2154
2151 mfs.clear()
2155 mfs.clear()
2152
2156
2153 # Go through all our files in order sorted by name.
2157 # Go through all our files in order sorted by name.
2154 count[:] = [0, len(changedfiles)]
2158 count[:] = [0, len(changedfiles)]
2155 for fname in sorted(changedfiles):
2159 for fname in sorted(changedfiles):
2156 filerevlog = self.file(fname)
2160 filerevlog = self.file(fname)
2157 if not len(filerevlog):
2161 if not len(filerevlog):
2158 raise util.Abort(_("empty or missing revlog for %s")
2162 raise util.Abort(_("empty or missing revlog for %s")
2159 % fname)
2163 % fname)
2160 fstate[0] = fname
2164 fstate[0] = fname
2161 fstate[1] = fnodes.pop(fname, {})
2165 fstate[1] = fnodes.pop(fname, {})
2162
2166
2163 nodelist = prune(filerevlog, fstate[1])
2167 nodelist = prune(filerevlog, fstate[1])
2164 if nodelist:
2168 if nodelist:
2165 count[0] += 1
2169 count[0] += 1
2166 yield bundler.fileheader(fname)
2170 yield bundler.fileheader(fname)
2167 for chunk in filerevlog.group(nodelist, bundler, reorder):
2171 for chunk in filerevlog.group(nodelist, bundler, reorder):
2168 yield chunk
2172 yield chunk
2169
2173
2170 # Signal that no more groups are left.
2174 # Signal that no more groups are left.
2171 yield bundler.close()
2175 yield bundler.close()
2172 progress(_bundling, None)
2176 progress(_bundling, None)
2173
2177
2174 if csets:
2178 if csets:
2175 self.hook('outgoing', node=hex(csets[0]), source=source)
2179 self.hook('outgoing', node=hex(csets[0]), source=source)
2176
2180
2177 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2181 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2178
2182
2179 def changegroup(self, basenodes, source):
2183 def changegroup(self, basenodes, source):
2180 # to avoid a race we use changegroupsubset() (issue1320)
2184 # to avoid a race we use changegroupsubset() (issue1320)
2181 return self.changegroupsubset(basenodes, self.heads(), source)
2185 return self.changegroupsubset(basenodes, self.heads(), source)
2182
2186
2183 def _changegroup(self, nodes, source):
2187 def _changegroup(self, nodes, source):
2184 """Compute the changegroup of all nodes that we have that a recipient
2188 """Compute the changegroup of all nodes that we have that a recipient
2185 doesn't. Return a chunkbuffer object whose read() method will return
2189 doesn't. Return a chunkbuffer object whose read() method will return
2186 successive changegroup chunks.
2190 successive changegroup chunks.
2187
2191
2188 This is much easier than the previous function as we can assume that
2192 This is much easier than the previous function as we can assume that
2189 the recipient has any changenode we aren't sending them.
2193 the recipient has any changenode we aren't sending them.
2190
2194
2191 nodes is the set of nodes to send"""
2195 nodes is the set of nodes to send"""
2192
2196
2193 cl = self.changelog
2197 cl = self.changelog
2194 mf = self.manifest
2198 mf = self.manifest
2195 mfs = {}
2199 mfs = {}
2196 changedfiles = set()
2200 changedfiles = set()
2197 fstate = ['']
2201 fstate = ['']
2198 count = [0, 0]
2202 count = [0, 0]
2199
2203
2200 self.hook('preoutgoing', throw=True, source=source)
2204 self.hook('preoutgoing', throw=True, source=source)
2201 self.changegroupinfo(nodes, source)
2205 self.changegroupinfo(nodes, source)
2202
2206
2203 revset = set([cl.rev(n) for n in nodes])
2207 revset = set([cl.rev(n) for n in nodes])
2204
2208
2205 def gennodelst(log):
2209 def gennodelst(log):
2206 ln, llr = log.node, log.linkrev
2210 ln, llr = log.node, log.linkrev
2207 return [ln(r) for r in log if llr(r) in revset]
2211 return [ln(r) for r in log if llr(r) in revset]
2208
2212
2209 progress = self.ui.progress
2213 progress = self.ui.progress
2210 _bundling = _('bundling')
2214 _bundling = _('bundling')
2211 _changesets = _('changesets')
2215 _changesets = _('changesets')
2212 _manifests = _('manifests')
2216 _manifests = _('manifests')
2213 _files = _('files')
2217 _files = _('files')
2214
2218
2215 def lookup(revlog, x):
2219 def lookup(revlog, x):
2216 if revlog == cl:
2220 if revlog == cl:
2217 c = cl.read(x)
2221 c = cl.read(x)
2218 changedfiles.update(c[3])
2222 changedfiles.update(c[3])
2219 mfs.setdefault(c[0], x)
2223 mfs.setdefault(c[0], x)
2220 count[0] += 1
2224 count[0] += 1
2221 progress(_bundling, count[0],
2225 progress(_bundling, count[0],
2222 unit=_changesets, total=count[1])
2226 unit=_changesets, total=count[1])
2223 return x
2227 return x
2224 elif revlog == mf:
2228 elif revlog == mf:
2225 count[0] += 1
2229 count[0] += 1
2226 progress(_bundling, count[0],
2230 progress(_bundling, count[0],
2227 unit=_manifests, total=count[1])
2231 unit=_manifests, total=count[1])
2228 return cl.node(revlog.linkrev(revlog.rev(x)))
2232 return cl.node(revlog.linkrev(revlog.rev(x)))
2229 else:
2233 else:
2230 progress(_bundling, count[0], item=fstate[0],
2234 progress(_bundling, count[0], item=fstate[0],
2231 total=count[1], unit=_files)
2235 total=count[1], unit=_files)
2232 return cl.node(revlog.linkrev(revlog.rev(x)))
2236 return cl.node(revlog.linkrev(revlog.rev(x)))
2233
2237
2234 bundler = changegroup.bundle10(lookup)
2238 bundler = changegroup.bundle10(lookup)
2235 reorder = self.ui.config('bundle', 'reorder', 'auto')
2239 reorder = self.ui.config('bundle', 'reorder', 'auto')
2236 if reorder == 'auto':
2240 if reorder == 'auto':
2237 reorder = None
2241 reorder = None
2238 else:
2242 else:
2239 reorder = util.parsebool(reorder)
2243 reorder = util.parsebool(reorder)
2240
2244
2241 def gengroup():
2245 def gengroup():
2242 '''yield a sequence of changegroup chunks (strings)'''
2246 '''yield a sequence of changegroup chunks (strings)'''
2243 # construct a list of all changed files
2247 # construct a list of all changed files
2244
2248
2245 count[:] = [0, len(nodes)]
2249 count[:] = [0, len(nodes)]
2246 for chunk in cl.group(nodes, bundler, reorder=reorder):
2250 for chunk in cl.group(nodes, bundler, reorder=reorder):
2247 yield chunk
2251 yield chunk
2248 progress(_bundling, None)
2252 progress(_bundling, None)
2249
2253
2250 count[:] = [0, len(mfs)]
2254 count[:] = [0, len(mfs)]
2251 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2255 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2252 yield chunk
2256 yield chunk
2253 progress(_bundling, None)
2257 progress(_bundling, None)
2254
2258
2255 count[:] = [0, len(changedfiles)]
2259 count[:] = [0, len(changedfiles)]
2256 for fname in sorted(changedfiles):
2260 for fname in sorted(changedfiles):
2257 filerevlog = self.file(fname)
2261 filerevlog = self.file(fname)
2258 if not len(filerevlog):
2262 if not len(filerevlog):
2259 raise util.Abort(_("empty or missing revlog for %s")
2263 raise util.Abort(_("empty or missing revlog for %s")
2260 % fname)
2264 % fname)
2261 fstate[0] = fname
2265 fstate[0] = fname
2262 nodelist = gennodelst(filerevlog)
2266 nodelist = gennodelst(filerevlog)
2263 if nodelist:
2267 if nodelist:
2264 count[0] += 1
2268 count[0] += 1
2265 yield bundler.fileheader(fname)
2269 yield bundler.fileheader(fname)
2266 for chunk in filerevlog.group(nodelist, bundler, reorder):
2270 for chunk in filerevlog.group(nodelist, bundler, reorder):
2267 yield chunk
2271 yield chunk
2268 yield bundler.close()
2272 yield bundler.close()
2269 progress(_bundling, None)
2273 progress(_bundling, None)
2270
2274
2271 if nodes:
2275 if nodes:
2272 self.hook('outgoing', node=hex(nodes[0]), source=source)
2276 self.hook('outgoing', node=hex(nodes[0]), source=source)
2273
2277
2274 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2278 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2275
2279
2276 def addchangegroup(self, source, srctype, url, emptyok=False):
2280 def addchangegroup(self, source, srctype, url, emptyok=False):
2277 """Add the changegroup returned by source.read() to this repo.
2281 """Add the changegroup returned by source.read() to this repo.
2278 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2282 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2279 the URL of the repo where this changegroup is coming from.
2283 the URL of the repo where this changegroup is coming from.
2280
2284
2281 Return an integer summarizing the change to this repo:
2285 Return an integer summarizing the change to this repo:
2282 - nothing changed or no source: 0
2286 - nothing changed or no source: 0
2283 - more heads than before: 1+added heads (2..n)
2287 - more heads than before: 1+added heads (2..n)
2284 - fewer heads than before: -1-removed heads (-2..-n)
2288 - fewer heads than before: -1-removed heads (-2..-n)
2285 - number of heads stays the same: 1
2289 - number of heads stays the same: 1
2286 """
2290 """
2287 def csmap(x):
2291 def csmap(x):
2288 self.ui.debug("add changeset %s\n" % short(x))
2292 self.ui.debug("add changeset %s\n" % short(x))
2289 return len(cl)
2293 return len(cl)
2290
2294
2291 def revmap(x):
2295 def revmap(x):
2292 return cl.rev(x)
2296 return cl.rev(x)
2293
2297
2294 if not source:
2298 if not source:
2295 return 0
2299 return 0
2296
2300
2297 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2301 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2298
2302
2299 changesets = files = revisions = 0
2303 changesets = files = revisions = 0
2300 efiles = set()
2304 efiles = set()
2301
2305
2302 # write changelog data to temp files so concurrent readers will not see
2306 # write changelog data to temp files so concurrent readers will not see
2303 # inconsistent view
2307 # inconsistent view
2304 cl = self.changelog
2308 cl = self.changelog
2305 cl.delayupdate()
2309 cl.delayupdate()
2306 oldheads = cl.heads()
2310 oldheads = cl.heads()
2307
2311
2308 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2312 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2309 try:
2313 try:
2310 trp = weakref.proxy(tr)
2314 trp = weakref.proxy(tr)
2311 # pull off the changeset group
2315 # pull off the changeset group
2312 self.ui.status(_("adding changesets\n"))
2316 self.ui.status(_("adding changesets\n"))
2313 clstart = len(cl)
2317 clstart = len(cl)
2314 class prog(object):
2318 class prog(object):
2315 step = _('changesets')
2319 step = _('changesets')
2316 count = 1
2320 count = 1
2317 ui = self.ui
2321 ui = self.ui
2318 total = None
2322 total = None
2319 def __call__(self):
2323 def __call__(self):
2320 self.ui.progress(self.step, self.count, unit=_('chunks'),
2324 self.ui.progress(self.step, self.count, unit=_('chunks'),
2321 total=self.total)
2325 total=self.total)
2322 self.count += 1
2326 self.count += 1
2323 pr = prog()
2327 pr = prog()
2324 source.callback = pr
2328 source.callback = pr
2325
2329
2326 source.changelogheader()
2330 source.changelogheader()
2327 srccontent = cl.addgroup(source, csmap, trp)
2331 srccontent = cl.addgroup(source, csmap, trp)
2328 if not (srccontent or emptyok):
2332 if not (srccontent or emptyok):
2329 raise util.Abort(_("received changelog group is empty"))
2333 raise util.Abort(_("received changelog group is empty"))
2330 clend = len(cl)
2334 clend = len(cl)
2331 changesets = clend - clstart
2335 changesets = clend - clstart
2332 for c in xrange(clstart, clend):
2336 for c in xrange(clstart, clend):
2333 efiles.update(self[c].files())
2337 efiles.update(self[c].files())
2334 efiles = len(efiles)
2338 efiles = len(efiles)
2335 self.ui.progress(_('changesets'), None)
2339 self.ui.progress(_('changesets'), None)
2336
2340
2337 # pull off the manifest group
2341 # pull off the manifest group
2338 self.ui.status(_("adding manifests\n"))
2342 self.ui.status(_("adding manifests\n"))
2339 pr.step = _('manifests')
2343 pr.step = _('manifests')
2340 pr.count = 1
2344 pr.count = 1
2341 pr.total = changesets # manifests <= changesets
2345 pr.total = changesets # manifests <= changesets
2342 # no need to check for empty manifest group here:
2346 # no need to check for empty manifest group here:
2343 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2347 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2344 # no new manifest will be created and the manifest group will
2348 # no new manifest will be created and the manifest group will
2345 # be empty during the pull
2349 # be empty during the pull
2346 source.manifestheader()
2350 source.manifestheader()
2347 self.manifest.addgroup(source, revmap, trp)
2351 self.manifest.addgroup(source, revmap, trp)
2348 self.ui.progress(_('manifests'), None)
2352 self.ui.progress(_('manifests'), None)
2349
2353
2350 needfiles = {}
2354 needfiles = {}
2351 if self.ui.configbool('server', 'validate', default=False):
2355 if self.ui.configbool('server', 'validate', default=False):
2352 # validate incoming csets have their manifests
2356 # validate incoming csets have their manifests
2353 for cset in xrange(clstart, clend):
2357 for cset in xrange(clstart, clend):
2354 mfest = self.changelog.read(self.changelog.node(cset))[0]
2358 mfest = self.changelog.read(self.changelog.node(cset))[0]
2355 mfest = self.manifest.readdelta(mfest)
2359 mfest = self.manifest.readdelta(mfest)
2356 # store file nodes we must see
2360 # store file nodes we must see
2357 for f, n in mfest.iteritems():
2361 for f, n in mfest.iteritems():
2358 needfiles.setdefault(f, set()).add(n)
2362 needfiles.setdefault(f, set()).add(n)
2359
2363
2360 # process the files
2364 # process the files
2361 self.ui.status(_("adding file changes\n"))
2365 self.ui.status(_("adding file changes\n"))
2362 pr.step = _('files')
2366 pr.step = _('files')
2363 pr.count = 1
2367 pr.count = 1
2364 pr.total = efiles
2368 pr.total = efiles
2365 source.callback = None
2369 source.callback = None
2366
2370
2367 while True:
2371 while True:
2368 chunkdata = source.filelogheader()
2372 chunkdata = source.filelogheader()
2369 if not chunkdata:
2373 if not chunkdata:
2370 break
2374 break
2371 f = chunkdata["filename"]
2375 f = chunkdata["filename"]
2372 self.ui.debug("adding %s revisions\n" % f)
2376 self.ui.debug("adding %s revisions\n" % f)
2373 pr()
2377 pr()
2374 fl = self.file(f)
2378 fl = self.file(f)
2375 o = len(fl)
2379 o = len(fl)
2376 if not fl.addgroup(source, revmap, trp):
2380 if not fl.addgroup(source, revmap, trp):
2377 raise util.Abort(_("received file revlog group is empty"))
2381 raise util.Abort(_("received file revlog group is empty"))
2378 revisions += len(fl) - o
2382 revisions += len(fl) - o
2379 files += 1
2383 files += 1
2380 if f in needfiles:
2384 if f in needfiles:
2381 needs = needfiles[f]
2385 needs = needfiles[f]
2382 for new in xrange(o, len(fl)):
2386 for new in xrange(o, len(fl)):
2383 n = fl.node(new)
2387 n = fl.node(new)
2384 if n in needs:
2388 if n in needs:
2385 needs.remove(n)
2389 needs.remove(n)
2386 if not needs:
2390 if not needs:
2387 del needfiles[f]
2391 del needfiles[f]
2388 self.ui.progress(_('files'), None)
2392 self.ui.progress(_('files'), None)
2389
2393
2390 for f, needs in needfiles.iteritems():
2394 for f, needs in needfiles.iteritems():
2391 fl = self.file(f)
2395 fl = self.file(f)
2392 for n in needs:
2396 for n in needs:
2393 try:
2397 try:
2394 fl.rev(n)
2398 fl.rev(n)
2395 except error.LookupError:
2399 except error.LookupError:
2396 raise util.Abort(
2400 raise util.Abort(
2397 _('missing file data for %s:%s - run hg verify') %
2401 _('missing file data for %s:%s - run hg verify') %
2398 (f, hex(n)))
2402 (f, hex(n)))
2399
2403
2400 dh = 0
2404 dh = 0
2401 if oldheads:
2405 if oldheads:
2402 heads = cl.heads()
2406 heads = cl.heads()
2403 dh = len(heads) - len(oldheads)
2407 dh = len(heads) - len(oldheads)
2404 for h in heads:
2408 for h in heads:
2405 if h not in oldheads and self[h].closesbranch():
2409 if h not in oldheads and self[h].closesbranch():
2406 dh -= 1
2410 dh -= 1
2407 htext = ""
2411 htext = ""
2408 if dh:
2412 if dh:
2409 htext = _(" (%+d heads)") % dh
2413 htext = _(" (%+d heads)") % dh
2410
2414
2411 self.ui.status(_("added %d changesets"
2415 self.ui.status(_("added %d changesets"
2412 " with %d changes to %d files%s\n")
2416 " with %d changes to %d files%s\n")
2413 % (changesets, revisions, files, htext))
2417 % (changesets, revisions, files, htext))
2414 obsolete.clearobscaches(self)
2418 obsolete.clearobscaches(self)
2415
2419
2416 if changesets > 0:
2420 if changesets > 0:
2417 p = lambda: cl.writepending() and self.root or ""
2421 p = lambda: cl.writepending() and self.root or ""
2418 self.hook('pretxnchangegroup', throw=True,
2422 self.hook('pretxnchangegroup', throw=True,
2419 node=hex(cl.node(clstart)), source=srctype,
2423 node=hex(cl.node(clstart)), source=srctype,
2420 url=url, pending=p)
2424 url=url, pending=p)
2421
2425
2422 added = [cl.node(r) for r in xrange(clstart, clend)]
2426 added = [cl.node(r) for r in xrange(clstart, clend)]
2423 publishing = self.ui.configbool('phases', 'publish', True)
2427 publishing = self.ui.configbool('phases', 'publish', True)
2424 if srctype == 'push':
2428 if srctype == 'push':
2425 # Old server can not push the boundary themself.
2429 # Old server can not push the boundary themself.
2426 # New server won't push the boundary if changeset already
2430 # New server won't push the boundary if changeset already
2427 # existed locally as secrete
2431 # existed locally as secrete
2428 #
2432 #
2429 # We should not use added here but the list of all change in
2433 # We should not use added here but the list of all change in
2430 # the bundle
2434 # the bundle
2431 if publishing:
2435 if publishing:
2432 phases.advanceboundary(self, phases.public, srccontent)
2436 phases.advanceboundary(self, phases.public, srccontent)
2433 else:
2437 else:
2434 phases.advanceboundary(self, phases.draft, srccontent)
2438 phases.advanceboundary(self, phases.draft, srccontent)
2435 phases.retractboundary(self, phases.draft, added)
2439 phases.retractboundary(self, phases.draft, added)
2436 elif srctype != 'strip':
2440 elif srctype != 'strip':
2437 # publishing only alter behavior during push
2441 # publishing only alter behavior during push
2438 #
2442 #
2439 # strip should not touch boundary at all
2443 # strip should not touch boundary at all
2440 phases.retractboundary(self, phases.draft, added)
2444 phases.retractboundary(self, phases.draft, added)
2441
2445
2442 # make changelog see real files again
2446 # make changelog see real files again
2443 cl.finalize(trp)
2447 cl.finalize(trp)
2444
2448
2445 tr.close()
2449 tr.close()
2446
2450
2447 if changesets > 0:
2451 if changesets > 0:
2448 self.updatebranchcache()
2452 self.updatebranchcache()
2449 def runhooks():
2453 def runhooks():
2450 # forcefully update the on-disk branch cache
2454 # forcefully update the on-disk branch cache
2451 self.ui.debug("updating the branch cache\n")
2455 self.ui.debug("updating the branch cache\n")
2452 self.hook("changegroup", node=hex(cl.node(clstart)),
2456 self.hook("changegroup", node=hex(cl.node(clstart)),
2453 source=srctype, url=url)
2457 source=srctype, url=url)
2454
2458
2455 for n in added:
2459 for n in added:
2456 self.hook("incoming", node=hex(n), source=srctype,
2460 self.hook("incoming", node=hex(n), source=srctype,
2457 url=url)
2461 url=url)
2458 self._afterlock(runhooks)
2462 self._afterlock(runhooks)
2459
2463
2460 finally:
2464 finally:
2461 tr.release()
2465 tr.release()
2462 # never return 0 here:
2466 # never return 0 here:
2463 if dh < 0:
2467 if dh < 0:
2464 return dh - 1
2468 return dh - 1
2465 else:
2469 else:
2466 return dh + 1
2470 return dh + 1
2467
2471
2468 def stream_in(self, remote, requirements):
2472 def stream_in(self, remote, requirements):
2469 lock = self.lock()
2473 lock = self.lock()
2470 try:
2474 try:
2471 fp = remote.stream_out()
2475 fp = remote.stream_out()
2472 l = fp.readline()
2476 l = fp.readline()
2473 try:
2477 try:
2474 resp = int(l)
2478 resp = int(l)
2475 except ValueError:
2479 except ValueError:
2476 raise error.ResponseError(
2480 raise error.ResponseError(
2477 _('unexpected response from remote server:'), l)
2481 _('unexpected response from remote server:'), l)
2478 if resp == 1:
2482 if resp == 1:
2479 raise util.Abort(_('operation forbidden by server'))
2483 raise util.Abort(_('operation forbidden by server'))
2480 elif resp == 2:
2484 elif resp == 2:
2481 raise util.Abort(_('locking the remote repository failed'))
2485 raise util.Abort(_('locking the remote repository failed'))
2482 elif resp != 0:
2486 elif resp != 0:
2483 raise util.Abort(_('the server sent an unknown error code'))
2487 raise util.Abort(_('the server sent an unknown error code'))
2484 self.ui.status(_('streaming all changes\n'))
2488 self.ui.status(_('streaming all changes\n'))
2485 l = fp.readline()
2489 l = fp.readline()
2486 try:
2490 try:
2487 total_files, total_bytes = map(int, l.split(' ', 1))
2491 total_files, total_bytes = map(int, l.split(' ', 1))
2488 except (ValueError, TypeError):
2492 except (ValueError, TypeError):
2489 raise error.ResponseError(
2493 raise error.ResponseError(
2490 _('unexpected response from remote server:'), l)
2494 _('unexpected response from remote server:'), l)
2491 self.ui.status(_('%d files to transfer, %s of data\n') %
2495 self.ui.status(_('%d files to transfer, %s of data\n') %
2492 (total_files, util.bytecount(total_bytes)))
2496 (total_files, util.bytecount(total_bytes)))
2493 handled_bytes = 0
2497 handled_bytes = 0
2494 self.ui.progress(_('clone'), 0, total=total_bytes)
2498 self.ui.progress(_('clone'), 0, total=total_bytes)
2495 start = time.time()
2499 start = time.time()
2496 for i in xrange(total_files):
2500 for i in xrange(total_files):
2497 # XXX doesn't support '\n' or '\r' in filenames
2501 # XXX doesn't support '\n' or '\r' in filenames
2498 l = fp.readline()
2502 l = fp.readline()
2499 try:
2503 try:
2500 name, size = l.split('\0', 1)
2504 name, size = l.split('\0', 1)
2501 size = int(size)
2505 size = int(size)
2502 except (ValueError, TypeError):
2506 except (ValueError, TypeError):
2503 raise error.ResponseError(
2507 raise error.ResponseError(
2504 _('unexpected response from remote server:'), l)
2508 _('unexpected response from remote server:'), l)
2505 if self.ui.debugflag:
2509 if self.ui.debugflag:
2506 self.ui.debug('adding %s (%s)\n' %
2510 self.ui.debug('adding %s (%s)\n' %
2507 (name, util.bytecount(size)))
2511 (name, util.bytecount(size)))
2508 # for backwards compat, name was partially encoded
2512 # for backwards compat, name was partially encoded
2509 ofp = self.sopener(store.decodedir(name), 'w')
2513 ofp = self.sopener(store.decodedir(name), 'w')
2510 for chunk in util.filechunkiter(fp, limit=size):
2514 for chunk in util.filechunkiter(fp, limit=size):
2511 handled_bytes += len(chunk)
2515 handled_bytes += len(chunk)
2512 self.ui.progress(_('clone'), handled_bytes,
2516 self.ui.progress(_('clone'), handled_bytes,
2513 total=total_bytes)
2517 total=total_bytes)
2514 ofp.write(chunk)
2518 ofp.write(chunk)
2515 ofp.close()
2519 ofp.close()
2516 elapsed = time.time() - start
2520 elapsed = time.time() - start
2517 if elapsed <= 0:
2521 if elapsed <= 0:
2518 elapsed = 0.001
2522 elapsed = 0.001
2519 self.ui.progress(_('clone'), None)
2523 self.ui.progress(_('clone'), None)
2520 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2524 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2521 (util.bytecount(total_bytes), elapsed,
2525 (util.bytecount(total_bytes), elapsed,
2522 util.bytecount(total_bytes / elapsed)))
2526 util.bytecount(total_bytes / elapsed)))
2523
2527
2524 # new requirements = old non-format requirements +
2528 # new requirements = old non-format requirements +
2525 # new format-related
2529 # new format-related
2526 # requirements from the streamed-in repository
2530 # requirements from the streamed-in repository
2527 requirements.update(set(self.requirements) - self.supportedformats)
2531 requirements.update(set(self.requirements) - self.supportedformats)
2528 self._applyrequirements(requirements)
2532 self._applyrequirements(requirements)
2529 self._writerequirements()
2533 self._writerequirements()
2530
2534
2531 self.invalidate()
2535 self.invalidate()
2532 return len(self.heads()) + 1
2536 return len(self.heads()) + 1
2533 finally:
2537 finally:
2534 lock.release()
2538 lock.release()
2535
2539
2536 def clone(self, remote, heads=[], stream=False):
2540 def clone(self, remote, heads=[], stream=False):
2537 '''clone remote repository.
2541 '''clone remote repository.
2538
2542
2539 keyword arguments:
2543 keyword arguments:
2540 heads: list of revs to clone (forces use of pull)
2544 heads: list of revs to clone (forces use of pull)
2541 stream: use streaming clone if possible'''
2545 stream: use streaming clone if possible'''
2542
2546
2543 # now, all clients that can request uncompressed clones can
2547 # now, all clients that can request uncompressed clones can
2544 # read repo formats supported by all servers that can serve
2548 # read repo formats supported by all servers that can serve
2545 # them.
2549 # them.
2546
2550
2547 # if revlog format changes, client will have to check version
2551 # if revlog format changes, client will have to check version
2548 # and format flags on "stream" capability, and use
2552 # and format flags on "stream" capability, and use
2549 # uncompressed only if compatible.
2553 # uncompressed only if compatible.
2550
2554
2551 if not stream:
2555 if not stream:
2552 # if the server explicitly prefers to stream (for fast LANs)
2556 # if the server explicitly prefers to stream (for fast LANs)
2553 stream = remote.capable('stream-preferred')
2557 stream = remote.capable('stream-preferred')
2554
2558
2555 if stream and not heads:
2559 if stream and not heads:
2556 # 'stream' means remote revlog format is revlogv1 only
2560 # 'stream' means remote revlog format is revlogv1 only
2557 if remote.capable('stream'):
2561 if remote.capable('stream'):
2558 return self.stream_in(remote, set(('revlogv1',)))
2562 return self.stream_in(remote, set(('revlogv1',)))
2559 # otherwise, 'streamreqs' contains the remote revlog format
2563 # otherwise, 'streamreqs' contains the remote revlog format
2560 streamreqs = remote.capable('streamreqs')
2564 streamreqs = remote.capable('streamreqs')
2561 if streamreqs:
2565 if streamreqs:
2562 streamreqs = set(streamreqs.split(','))
2566 streamreqs = set(streamreqs.split(','))
2563 # if we support it, stream in and adjust our requirements
2567 # if we support it, stream in and adjust our requirements
2564 if not streamreqs - self.supportedformats:
2568 if not streamreqs - self.supportedformats:
2565 return self.stream_in(remote, streamreqs)
2569 return self.stream_in(remote, streamreqs)
2566 return self.pull(remote, heads)
2570 return self.pull(remote, heads)
2567
2571
2568 def pushkey(self, namespace, key, old, new):
2572 def pushkey(self, namespace, key, old, new):
2569 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2573 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2570 old=old, new=new)
2574 old=old, new=new)
2571 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2575 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2572 ret = pushkey.push(self, namespace, key, old, new)
2576 ret = pushkey.push(self, namespace, key, old, new)
2573 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2577 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2574 ret=ret)
2578 ret=ret)
2575 return ret
2579 return ret
2576
2580
2577 def listkeys(self, namespace):
2581 def listkeys(self, namespace):
2578 self.hook('prelistkeys', throw=True, namespace=namespace)
2582 self.hook('prelistkeys', throw=True, namespace=namespace)
2579 self.ui.debug('listing keys for "%s"\n' % namespace)
2583 self.ui.debug('listing keys for "%s"\n' % namespace)
2580 values = pushkey.list(self, namespace)
2584 values = pushkey.list(self, namespace)
2581 self.hook('listkeys', namespace=namespace, values=values)
2585 self.hook('listkeys', namespace=namespace, values=values)
2582 return values
2586 return values
2583
2587
2584 def debugwireargs(self, one, two, three=None, four=None, five=None):
2588 def debugwireargs(self, one, two, three=None, four=None, five=None):
2585 '''used to test argument passing over the wire'''
2589 '''used to test argument passing over the wire'''
2586 return "%s %s %s %s %s" % (one, two, three, four, five)
2590 return "%s %s %s %s %s" % (one, two, three, four, five)
2587
2591
2588 def savecommitmessage(self, text):
2592 def savecommitmessage(self, text):
2589 fp = self.opener('last-message.txt', 'wb')
2593 fp = self.opener('last-message.txt', 'wb')
2590 try:
2594 try:
2591 fp.write(text)
2595 fp.write(text)
2592 finally:
2596 finally:
2593 fp.close()
2597 fp.close()
2594 return self.pathto(fp.name[len(self.root)+1:])
2598 return self.pathto(fp.name[len(self.root)+1:])
2595
2599
2596 # used to avoid circular references so destructors work
2600 # used to avoid circular references so destructors work
2597 def aftertrans(files):
2601 def aftertrans(files):
2598 renamefiles = [tuple(t) for t in files]
2602 renamefiles = [tuple(t) for t in files]
2599 def a():
2603 def a():
2600 for src, dest in renamefiles:
2604 for src, dest in renamefiles:
2601 try:
2605 try:
2602 util.rename(src, dest)
2606 util.rename(src, dest)
2603 except OSError: # journal file does not yet exist
2607 except OSError: # journal file does not yet exist
2604 pass
2608 pass
2605 return a
2609 return a
2606
2610
2607 def undoname(fn):
2611 def undoname(fn):
2608 base, name = os.path.split(fn)
2612 base, name = os.path.split(fn)
2609 assert name.startswith('journal')
2613 assert name.startswith('journal')
2610 return os.path.join(base, name.replace('journal', 'undo', 1))
2614 return os.path.join(base, name.replace('journal', 'undo', 1))
2611
2615
2612 def instance(ui, path, create):
2616 def instance(ui, path, create):
2613 return localrepository(ui, util.urllocalpath(path), create)
2617 return localrepository(ui, util.urllocalpath(path), create)
2614
2618
2615 def islocal(path):
2619 def islocal(path):
2616 return True
2620 return True
General Comments 0
You need to be logged in to leave comments. Login now