##// END OF EJS Templates
localrepo: translate "push includes X changeset" only once...
Thomas Arendsen Hein -
r17855:52608155 stable
parent child Browse files
Show More
@@ -1,2632 +1,2632 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import bin, hex, nullid, nullrev, short
7 from node import bin, hex, nullid, nullrev, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock, transaction, store, encoding, base85
11 import lock, transaction, store, encoding, base85
12 import scmutil, util, extensions, hook, error, revset
12 import scmutil, util, extensions, hook, error, revset
13 import match as matchmod
13 import match as matchmod
14 import merge as mergemod
14 import merge as mergemod
15 import tags as tagsmod
15 import tags as tagsmod
16 from lock import release
16 from lock import release
17 import weakref, errno, os, time, inspect
17 import weakref, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19 filecache = scmutil.filecache
19 filecache = scmutil.filecache
20
20
21 class storecache(filecache):
21 class storecache(filecache):
22 """filecache for files in the store"""
22 """filecache for files in the store"""
23 def join(self, obj, fname):
23 def join(self, obj, fname):
24 return obj.sjoin(fname)
24 return obj.sjoin(fname)
25
25
26 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
26 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
27 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
27 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
28
28
29 class localpeer(peer.peerrepository):
29 class localpeer(peer.peerrepository):
30 '''peer for a local repo; reflects only the most recent API'''
30 '''peer for a local repo; reflects only the most recent API'''
31
31
32 def __init__(self, repo, caps=MODERNCAPS):
32 def __init__(self, repo, caps=MODERNCAPS):
33 peer.peerrepository.__init__(self)
33 peer.peerrepository.__init__(self)
34 self._repo = repo
34 self._repo = repo
35 self.ui = repo.ui
35 self.ui = repo.ui
36 self._caps = repo._restrictcapabilities(caps)
36 self._caps = repo._restrictcapabilities(caps)
37 self.requirements = repo.requirements
37 self.requirements = repo.requirements
38 self.supportedformats = repo.supportedformats
38 self.supportedformats = repo.supportedformats
39
39
40 def close(self):
40 def close(self):
41 self._repo.close()
41 self._repo.close()
42
42
43 def _capabilities(self):
43 def _capabilities(self):
44 return self._caps
44 return self._caps
45
45
46 def local(self):
46 def local(self):
47 return self._repo
47 return self._repo
48
48
49 def canpush(self):
49 def canpush(self):
50 return True
50 return True
51
51
52 def url(self):
52 def url(self):
53 return self._repo.url()
53 return self._repo.url()
54
54
55 def lookup(self, key):
55 def lookup(self, key):
56 return self._repo.lookup(key)
56 return self._repo.lookup(key)
57
57
58 def branchmap(self):
58 def branchmap(self):
59 return discovery.visiblebranchmap(self._repo)
59 return discovery.visiblebranchmap(self._repo)
60
60
61 def heads(self):
61 def heads(self):
62 return discovery.visibleheads(self._repo)
62 return discovery.visibleheads(self._repo)
63
63
64 def known(self, nodes):
64 def known(self, nodes):
65 return self._repo.known(nodes)
65 return self._repo.known(nodes)
66
66
67 def getbundle(self, source, heads=None, common=None):
67 def getbundle(self, source, heads=None, common=None):
68 return self._repo.getbundle(source, heads=heads, common=common)
68 return self._repo.getbundle(source, heads=heads, common=common)
69
69
70 # TODO We might want to move the next two calls into legacypeer and add
70 # TODO We might want to move the next two calls into legacypeer and add
71 # unbundle instead.
71 # unbundle instead.
72
72
73 def lock(self):
73 def lock(self):
74 return self._repo.lock()
74 return self._repo.lock()
75
75
76 def addchangegroup(self, cg, source, url):
76 def addchangegroup(self, cg, source, url):
77 return self._repo.addchangegroup(cg, source, url)
77 return self._repo.addchangegroup(cg, source, url)
78
78
79 def pushkey(self, namespace, key, old, new):
79 def pushkey(self, namespace, key, old, new):
80 return self._repo.pushkey(namespace, key, old, new)
80 return self._repo.pushkey(namespace, key, old, new)
81
81
82 def listkeys(self, namespace):
82 def listkeys(self, namespace):
83 return self._repo.listkeys(namespace)
83 return self._repo.listkeys(namespace)
84
84
85 def debugwireargs(self, one, two, three=None, four=None, five=None):
85 def debugwireargs(self, one, two, three=None, four=None, five=None):
86 '''used to test argument passing over the wire'''
86 '''used to test argument passing over the wire'''
87 return "%s %s %s %s %s" % (one, two, three, four, five)
87 return "%s %s %s %s %s" % (one, two, three, four, five)
88
88
89 class locallegacypeer(localpeer):
89 class locallegacypeer(localpeer):
90 '''peer extension which implements legacy methods too; used for tests with
90 '''peer extension which implements legacy methods too; used for tests with
91 restricted capabilities'''
91 restricted capabilities'''
92
92
93 def __init__(self, repo):
93 def __init__(self, repo):
94 localpeer.__init__(self, repo, caps=LEGACYCAPS)
94 localpeer.__init__(self, repo, caps=LEGACYCAPS)
95
95
96 def branches(self, nodes):
96 def branches(self, nodes):
97 return self._repo.branches(nodes)
97 return self._repo.branches(nodes)
98
98
99 def between(self, pairs):
99 def between(self, pairs):
100 return self._repo.between(pairs)
100 return self._repo.between(pairs)
101
101
102 def changegroup(self, basenodes, source):
102 def changegroup(self, basenodes, source):
103 return self._repo.changegroup(basenodes, source)
103 return self._repo.changegroup(basenodes, source)
104
104
105 def changegroupsubset(self, bases, heads, source):
105 def changegroupsubset(self, bases, heads, source):
106 return self._repo.changegroupsubset(bases, heads, source)
106 return self._repo.changegroupsubset(bases, heads, source)
107
107
108 class localrepository(object):
108 class localrepository(object):
109
109
110 supportedformats = set(('revlogv1', 'generaldelta'))
110 supportedformats = set(('revlogv1', 'generaldelta'))
111 supported = supportedformats | set(('store', 'fncache', 'shared',
111 supported = supportedformats | set(('store', 'fncache', 'shared',
112 'dotencode'))
112 'dotencode'))
113 openerreqs = set(('revlogv1', 'generaldelta'))
113 openerreqs = set(('revlogv1', 'generaldelta'))
114 requirements = ['revlogv1']
114 requirements = ['revlogv1']
115
115
116 def _baserequirements(self, create):
116 def _baserequirements(self, create):
117 return self.requirements[:]
117 return self.requirements[:]
118
118
119 def __init__(self, baseui, path=None, create=False):
119 def __init__(self, baseui, path=None, create=False):
120 self.wvfs = scmutil.vfs(path, expand=True)
120 self.wvfs = scmutil.vfs(path, expand=True)
121 self.wopener = self.wvfs
121 self.wopener = self.wvfs
122 self.root = self.wvfs.base
122 self.root = self.wvfs.base
123 self.path = self.wvfs.join(".hg")
123 self.path = self.wvfs.join(".hg")
124 self.origroot = path
124 self.origroot = path
125 self.auditor = scmutil.pathauditor(self.root, self._checknested)
125 self.auditor = scmutil.pathauditor(self.root, self._checknested)
126 self.vfs = scmutil.vfs(self.path)
126 self.vfs = scmutil.vfs(self.path)
127 self.opener = self.vfs
127 self.opener = self.vfs
128 self.baseui = baseui
128 self.baseui = baseui
129 self.ui = baseui.copy()
129 self.ui = baseui.copy()
130 # A list of callback to shape the phase if no data were found.
130 # A list of callback to shape the phase if no data were found.
131 # Callback are in the form: func(repo, roots) --> processed root.
131 # Callback are in the form: func(repo, roots) --> processed root.
132 # This list it to be filled by extension during repo setup
132 # This list it to be filled by extension during repo setup
133 self._phasedefaults = []
133 self._phasedefaults = []
134 try:
134 try:
135 self.ui.readconfig(self.join("hgrc"), self.root)
135 self.ui.readconfig(self.join("hgrc"), self.root)
136 extensions.loadall(self.ui)
136 extensions.loadall(self.ui)
137 except IOError:
137 except IOError:
138 pass
138 pass
139
139
140 if not self.vfs.isdir():
140 if not self.vfs.isdir():
141 if create:
141 if create:
142 if not self.wvfs.exists():
142 if not self.wvfs.exists():
143 self.wvfs.makedirs()
143 self.wvfs.makedirs()
144 self.vfs.makedir(notindexed=True)
144 self.vfs.makedir(notindexed=True)
145 requirements = self._baserequirements(create)
145 requirements = self._baserequirements(create)
146 if self.ui.configbool('format', 'usestore', True):
146 if self.ui.configbool('format', 'usestore', True):
147 self.vfs.mkdir("store")
147 self.vfs.mkdir("store")
148 requirements.append("store")
148 requirements.append("store")
149 if self.ui.configbool('format', 'usefncache', True):
149 if self.ui.configbool('format', 'usefncache', True):
150 requirements.append("fncache")
150 requirements.append("fncache")
151 if self.ui.configbool('format', 'dotencode', True):
151 if self.ui.configbool('format', 'dotencode', True):
152 requirements.append('dotencode')
152 requirements.append('dotencode')
153 # create an invalid changelog
153 # create an invalid changelog
154 self.vfs.append(
154 self.vfs.append(
155 "00changelog.i",
155 "00changelog.i",
156 '\0\0\0\2' # represents revlogv2
156 '\0\0\0\2' # represents revlogv2
157 ' dummy changelog to prevent using the old repo layout'
157 ' dummy changelog to prevent using the old repo layout'
158 )
158 )
159 if self.ui.configbool('format', 'generaldelta', False):
159 if self.ui.configbool('format', 'generaldelta', False):
160 requirements.append("generaldelta")
160 requirements.append("generaldelta")
161 requirements = set(requirements)
161 requirements = set(requirements)
162 else:
162 else:
163 raise error.RepoError(_("repository %s not found") % path)
163 raise error.RepoError(_("repository %s not found") % path)
164 elif create:
164 elif create:
165 raise error.RepoError(_("repository %s already exists") % path)
165 raise error.RepoError(_("repository %s already exists") % path)
166 else:
166 else:
167 try:
167 try:
168 requirements = scmutil.readrequires(self.vfs, self.supported)
168 requirements = scmutil.readrequires(self.vfs, self.supported)
169 except IOError, inst:
169 except IOError, inst:
170 if inst.errno != errno.ENOENT:
170 if inst.errno != errno.ENOENT:
171 raise
171 raise
172 requirements = set()
172 requirements = set()
173
173
174 self.sharedpath = self.path
174 self.sharedpath = self.path
175 try:
175 try:
176 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
176 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
177 if not os.path.exists(s):
177 if not os.path.exists(s):
178 raise error.RepoError(
178 raise error.RepoError(
179 _('.hg/sharedpath points to nonexistent directory %s') % s)
179 _('.hg/sharedpath points to nonexistent directory %s') % s)
180 self.sharedpath = s
180 self.sharedpath = s
181 except IOError, inst:
181 except IOError, inst:
182 if inst.errno != errno.ENOENT:
182 if inst.errno != errno.ENOENT:
183 raise
183 raise
184
184
185 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
185 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
186 self.spath = self.store.path
186 self.spath = self.store.path
187 self.svfs = self.store.vfs
187 self.svfs = self.store.vfs
188 self.sopener = self.svfs
188 self.sopener = self.svfs
189 self.sjoin = self.store.join
189 self.sjoin = self.store.join
190 self.vfs.createmode = self.store.createmode
190 self.vfs.createmode = self.store.createmode
191 self._applyrequirements(requirements)
191 self._applyrequirements(requirements)
192 if create:
192 if create:
193 self._writerequirements()
193 self._writerequirements()
194
194
195
195
196 self._branchcache = None
196 self._branchcache = None
197 self._branchcachetip = None
197 self._branchcachetip = None
198 self.filterpats = {}
198 self.filterpats = {}
199 self._datafilters = {}
199 self._datafilters = {}
200 self._transref = self._lockref = self._wlockref = None
200 self._transref = self._lockref = self._wlockref = None
201
201
202 # A cache for various files under .hg/ that tracks file changes,
202 # A cache for various files under .hg/ that tracks file changes,
203 # (used by the filecache decorator)
203 # (used by the filecache decorator)
204 #
204 #
205 # Maps a property name to its util.filecacheentry
205 # Maps a property name to its util.filecacheentry
206 self._filecache = {}
206 self._filecache = {}
207
207
208 def close(self):
208 def close(self):
209 pass
209 pass
210
210
211 def _restrictcapabilities(self, caps):
211 def _restrictcapabilities(self, caps):
212 return caps
212 return caps
213
213
214 def _applyrequirements(self, requirements):
214 def _applyrequirements(self, requirements):
215 self.requirements = requirements
215 self.requirements = requirements
216 self.sopener.options = dict((r, 1) for r in requirements
216 self.sopener.options = dict((r, 1) for r in requirements
217 if r in self.openerreqs)
217 if r in self.openerreqs)
218
218
219 def _writerequirements(self):
219 def _writerequirements(self):
220 reqfile = self.opener("requires", "w")
220 reqfile = self.opener("requires", "w")
221 for r in self.requirements:
221 for r in self.requirements:
222 reqfile.write("%s\n" % r)
222 reqfile.write("%s\n" % r)
223 reqfile.close()
223 reqfile.close()
224
224
225 def _checknested(self, path):
225 def _checknested(self, path):
226 """Determine if path is a legal nested repository."""
226 """Determine if path is a legal nested repository."""
227 if not path.startswith(self.root):
227 if not path.startswith(self.root):
228 return False
228 return False
229 subpath = path[len(self.root) + 1:]
229 subpath = path[len(self.root) + 1:]
230 normsubpath = util.pconvert(subpath)
230 normsubpath = util.pconvert(subpath)
231
231
232 # XXX: Checking against the current working copy is wrong in
232 # XXX: Checking against the current working copy is wrong in
233 # the sense that it can reject things like
233 # the sense that it can reject things like
234 #
234 #
235 # $ hg cat -r 10 sub/x.txt
235 # $ hg cat -r 10 sub/x.txt
236 #
236 #
237 # if sub/ is no longer a subrepository in the working copy
237 # if sub/ is no longer a subrepository in the working copy
238 # parent revision.
238 # parent revision.
239 #
239 #
240 # However, it can of course also allow things that would have
240 # However, it can of course also allow things that would have
241 # been rejected before, such as the above cat command if sub/
241 # been rejected before, such as the above cat command if sub/
242 # is a subrepository now, but was a normal directory before.
242 # is a subrepository now, but was a normal directory before.
243 # The old path auditor would have rejected by mistake since it
243 # The old path auditor would have rejected by mistake since it
244 # panics when it sees sub/.hg/.
244 # panics when it sees sub/.hg/.
245 #
245 #
246 # All in all, checking against the working copy seems sensible
246 # All in all, checking against the working copy seems sensible
247 # since we want to prevent access to nested repositories on
247 # since we want to prevent access to nested repositories on
248 # the filesystem *now*.
248 # the filesystem *now*.
249 ctx = self[None]
249 ctx = self[None]
250 parts = util.splitpath(subpath)
250 parts = util.splitpath(subpath)
251 while parts:
251 while parts:
252 prefix = '/'.join(parts)
252 prefix = '/'.join(parts)
253 if prefix in ctx.substate:
253 if prefix in ctx.substate:
254 if prefix == normsubpath:
254 if prefix == normsubpath:
255 return True
255 return True
256 else:
256 else:
257 sub = ctx.sub(prefix)
257 sub = ctx.sub(prefix)
258 return sub.checknested(subpath[len(prefix) + 1:])
258 return sub.checknested(subpath[len(prefix) + 1:])
259 else:
259 else:
260 parts.pop()
260 parts.pop()
261 return False
261 return False
262
262
263 def peer(self):
263 def peer(self):
264 return localpeer(self) # not cached to avoid reference cycle
264 return localpeer(self) # not cached to avoid reference cycle
265
265
266 @filecache('bookmarks')
266 @filecache('bookmarks')
267 def _bookmarks(self):
267 def _bookmarks(self):
268 return bookmarks.read(self)
268 return bookmarks.read(self)
269
269
270 @filecache('bookmarks.current')
270 @filecache('bookmarks.current')
271 def _bookmarkcurrent(self):
271 def _bookmarkcurrent(self):
272 return bookmarks.readcurrent(self)
272 return bookmarks.readcurrent(self)
273
273
274 def _writebookmarks(self, marks):
274 def _writebookmarks(self, marks):
275 bookmarks.write(self)
275 bookmarks.write(self)
276
276
277 def bookmarkheads(self, bookmark):
277 def bookmarkheads(self, bookmark):
278 name = bookmark.split('@', 1)[0]
278 name = bookmark.split('@', 1)[0]
279 heads = []
279 heads = []
280 for mark, n in self._bookmarks.iteritems():
280 for mark, n in self._bookmarks.iteritems():
281 if mark.split('@', 1)[0] == name:
281 if mark.split('@', 1)[0] == name:
282 heads.append(n)
282 heads.append(n)
283 return heads
283 return heads
284
284
285 @storecache('phaseroots')
285 @storecache('phaseroots')
286 def _phasecache(self):
286 def _phasecache(self):
287 return phases.phasecache(self, self._phasedefaults)
287 return phases.phasecache(self, self._phasedefaults)
288
288
289 @storecache('obsstore')
289 @storecache('obsstore')
290 def obsstore(self):
290 def obsstore(self):
291 store = obsolete.obsstore(self.sopener)
291 store = obsolete.obsstore(self.sopener)
292 if store and not obsolete._enabled:
292 if store and not obsolete._enabled:
293 # message is rare enough to not be translated
293 # message is rare enough to not be translated
294 msg = 'obsolete feature not enabled but %i markers found!\n'
294 msg = 'obsolete feature not enabled but %i markers found!\n'
295 self.ui.warn(msg % len(list(store)))
295 self.ui.warn(msg % len(list(store)))
296 return store
296 return store
297
297
298 @propertycache
298 @propertycache
299 def hiddenrevs(self):
299 def hiddenrevs(self):
300 """hiddenrevs: revs that should be hidden by command and tools
300 """hiddenrevs: revs that should be hidden by command and tools
301
301
302 This set is carried on the repo to ease initialization and lazy
302 This set is carried on the repo to ease initialization and lazy
303 loading; it'll probably move back to changelog for efficiency and
303 loading; it'll probably move back to changelog for efficiency and
304 consistency reasons.
304 consistency reasons.
305
305
306 Note that the hiddenrevs will needs invalidations when
306 Note that the hiddenrevs will needs invalidations when
307 - a new changesets is added (possible unstable above extinct)
307 - a new changesets is added (possible unstable above extinct)
308 - a new obsolete marker is added (possible new extinct changeset)
308 - a new obsolete marker is added (possible new extinct changeset)
309
309
310 hidden changesets cannot have non-hidden descendants
310 hidden changesets cannot have non-hidden descendants
311 """
311 """
312 hidden = set()
312 hidden = set()
313 if self.obsstore:
313 if self.obsstore:
314 ### hide extinct changeset that are not accessible by any mean
314 ### hide extinct changeset that are not accessible by any mean
315 hiddenquery = 'extinct() - ::(. + bookmark())'
315 hiddenquery = 'extinct() - ::(. + bookmark())'
316 hidden.update(self.revs(hiddenquery))
316 hidden.update(self.revs(hiddenquery))
317 return hidden
317 return hidden
318
318
319 @storecache('00changelog.i')
319 @storecache('00changelog.i')
320 def changelog(self):
320 def changelog(self):
321 c = changelog.changelog(self.sopener)
321 c = changelog.changelog(self.sopener)
322 if 'HG_PENDING' in os.environ:
322 if 'HG_PENDING' in os.environ:
323 p = os.environ['HG_PENDING']
323 p = os.environ['HG_PENDING']
324 if p.startswith(self.root):
324 if p.startswith(self.root):
325 c.readpending('00changelog.i.a')
325 c.readpending('00changelog.i.a')
326 return c
326 return c
327
327
328 @storecache('00manifest.i')
328 @storecache('00manifest.i')
329 def manifest(self):
329 def manifest(self):
330 return manifest.manifest(self.sopener)
330 return manifest.manifest(self.sopener)
331
331
332 @filecache('dirstate')
332 @filecache('dirstate')
333 def dirstate(self):
333 def dirstate(self):
334 warned = [0]
334 warned = [0]
335 def validate(node):
335 def validate(node):
336 try:
336 try:
337 self.changelog.rev(node)
337 self.changelog.rev(node)
338 return node
338 return node
339 except error.LookupError:
339 except error.LookupError:
340 if not warned[0]:
340 if not warned[0]:
341 warned[0] = True
341 warned[0] = True
342 self.ui.warn(_("warning: ignoring unknown"
342 self.ui.warn(_("warning: ignoring unknown"
343 " working parent %s!\n") % short(node))
343 " working parent %s!\n") % short(node))
344 return nullid
344 return nullid
345
345
346 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
346 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
347
347
348 def __getitem__(self, changeid):
348 def __getitem__(self, changeid):
349 if changeid is None:
349 if changeid is None:
350 return context.workingctx(self)
350 return context.workingctx(self)
351 return context.changectx(self, changeid)
351 return context.changectx(self, changeid)
352
352
353 def __contains__(self, changeid):
353 def __contains__(self, changeid):
354 try:
354 try:
355 return bool(self.lookup(changeid))
355 return bool(self.lookup(changeid))
356 except error.RepoLookupError:
356 except error.RepoLookupError:
357 return False
357 return False
358
358
359 def __nonzero__(self):
359 def __nonzero__(self):
360 return True
360 return True
361
361
362 def __len__(self):
362 def __len__(self):
363 return len(self.changelog)
363 return len(self.changelog)
364
364
365 def __iter__(self):
365 def __iter__(self):
366 return iter(self.changelog)
366 return iter(self.changelog)
367
367
368 def revs(self, expr, *args):
368 def revs(self, expr, *args):
369 '''Return a list of revisions matching the given revset'''
369 '''Return a list of revisions matching the given revset'''
370 expr = revset.formatspec(expr, *args)
370 expr = revset.formatspec(expr, *args)
371 m = revset.match(None, expr)
371 m = revset.match(None, expr)
372 return [r for r in m(self, list(self))]
372 return [r for r in m(self, list(self))]
373
373
374 def set(self, expr, *args):
374 def set(self, expr, *args):
375 '''
375 '''
376 Yield a context for each matching revision, after doing arg
376 Yield a context for each matching revision, after doing arg
377 replacement via revset.formatspec
377 replacement via revset.formatspec
378 '''
378 '''
379 for r in self.revs(expr, *args):
379 for r in self.revs(expr, *args):
380 yield self[r]
380 yield self[r]
381
381
382 def url(self):
382 def url(self):
383 return 'file:' + self.root
383 return 'file:' + self.root
384
384
385 def hook(self, name, throw=False, **args):
385 def hook(self, name, throw=False, **args):
386 return hook.hook(self.ui, self, name, throw, **args)
386 return hook.hook(self.ui, self, name, throw, **args)
387
387
388 def _tag(self, names, node, message, local, user, date, extra={}):
388 def _tag(self, names, node, message, local, user, date, extra={}):
389 if isinstance(names, str):
389 if isinstance(names, str):
390 names = (names,)
390 names = (names,)
391
391
392 branches = self.branchmap()
392 branches = self.branchmap()
393 for name in names:
393 for name in names:
394 self.hook('pretag', throw=True, node=hex(node), tag=name,
394 self.hook('pretag', throw=True, node=hex(node), tag=name,
395 local=local)
395 local=local)
396 if name in branches:
396 if name in branches:
397 self.ui.warn(_("warning: tag %s conflicts with existing"
397 self.ui.warn(_("warning: tag %s conflicts with existing"
398 " branch name\n") % name)
398 " branch name\n") % name)
399
399
400 def writetags(fp, names, munge, prevtags):
400 def writetags(fp, names, munge, prevtags):
401 fp.seek(0, 2)
401 fp.seek(0, 2)
402 if prevtags and prevtags[-1] != '\n':
402 if prevtags and prevtags[-1] != '\n':
403 fp.write('\n')
403 fp.write('\n')
404 for name in names:
404 for name in names:
405 m = munge and munge(name) or name
405 m = munge and munge(name) or name
406 if (self._tagscache.tagtypes and
406 if (self._tagscache.tagtypes and
407 name in self._tagscache.tagtypes):
407 name in self._tagscache.tagtypes):
408 old = self.tags().get(name, nullid)
408 old = self.tags().get(name, nullid)
409 fp.write('%s %s\n' % (hex(old), m))
409 fp.write('%s %s\n' % (hex(old), m))
410 fp.write('%s %s\n' % (hex(node), m))
410 fp.write('%s %s\n' % (hex(node), m))
411 fp.close()
411 fp.close()
412
412
413 prevtags = ''
413 prevtags = ''
414 if local:
414 if local:
415 try:
415 try:
416 fp = self.opener('localtags', 'r+')
416 fp = self.opener('localtags', 'r+')
417 except IOError:
417 except IOError:
418 fp = self.opener('localtags', 'a')
418 fp = self.opener('localtags', 'a')
419 else:
419 else:
420 prevtags = fp.read()
420 prevtags = fp.read()
421
421
422 # local tags are stored in the current charset
422 # local tags are stored in the current charset
423 writetags(fp, names, None, prevtags)
423 writetags(fp, names, None, prevtags)
424 for name in names:
424 for name in names:
425 self.hook('tag', node=hex(node), tag=name, local=local)
425 self.hook('tag', node=hex(node), tag=name, local=local)
426 return
426 return
427
427
428 try:
428 try:
429 fp = self.wfile('.hgtags', 'rb+')
429 fp = self.wfile('.hgtags', 'rb+')
430 except IOError, e:
430 except IOError, e:
431 if e.errno != errno.ENOENT:
431 if e.errno != errno.ENOENT:
432 raise
432 raise
433 fp = self.wfile('.hgtags', 'ab')
433 fp = self.wfile('.hgtags', 'ab')
434 else:
434 else:
435 prevtags = fp.read()
435 prevtags = fp.read()
436
436
437 # committed tags are stored in UTF-8
437 # committed tags are stored in UTF-8
438 writetags(fp, names, encoding.fromlocal, prevtags)
438 writetags(fp, names, encoding.fromlocal, prevtags)
439
439
440 fp.close()
440 fp.close()
441
441
442 self.invalidatecaches()
442 self.invalidatecaches()
443
443
444 if '.hgtags' not in self.dirstate:
444 if '.hgtags' not in self.dirstate:
445 self[None].add(['.hgtags'])
445 self[None].add(['.hgtags'])
446
446
447 m = matchmod.exact(self.root, '', ['.hgtags'])
447 m = matchmod.exact(self.root, '', ['.hgtags'])
448 tagnode = self.commit(message, user, date, extra=extra, match=m)
448 tagnode = self.commit(message, user, date, extra=extra, match=m)
449
449
450 for name in names:
450 for name in names:
451 self.hook('tag', node=hex(node), tag=name, local=local)
451 self.hook('tag', node=hex(node), tag=name, local=local)
452
452
453 return tagnode
453 return tagnode
454
454
455 def tag(self, names, node, message, local, user, date):
455 def tag(self, names, node, message, local, user, date):
456 '''tag a revision with one or more symbolic names.
456 '''tag a revision with one or more symbolic names.
457
457
458 names is a list of strings or, when adding a single tag, names may be a
458 names is a list of strings or, when adding a single tag, names may be a
459 string.
459 string.
460
460
461 if local is True, the tags are stored in a per-repository file.
461 if local is True, the tags are stored in a per-repository file.
462 otherwise, they are stored in the .hgtags file, and a new
462 otherwise, they are stored in the .hgtags file, and a new
463 changeset is committed with the change.
463 changeset is committed with the change.
464
464
465 keyword arguments:
465 keyword arguments:
466
466
467 local: whether to store tags in non-version-controlled file
467 local: whether to store tags in non-version-controlled file
468 (default False)
468 (default False)
469
469
470 message: commit message to use if committing
470 message: commit message to use if committing
471
471
472 user: name of user to use if committing
472 user: name of user to use if committing
473
473
474 date: date tuple to use if committing'''
474 date: date tuple to use if committing'''
475
475
476 if not local:
476 if not local:
477 for x in self.status()[:5]:
477 for x in self.status()[:5]:
478 if '.hgtags' in x:
478 if '.hgtags' in x:
479 raise util.Abort(_('working copy of .hgtags is changed '
479 raise util.Abort(_('working copy of .hgtags is changed '
480 '(please commit .hgtags manually)'))
480 '(please commit .hgtags manually)'))
481
481
482 self.tags() # instantiate the cache
482 self.tags() # instantiate the cache
483 self._tag(names, node, message, local, user, date)
483 self._tag(names, node, message, local, user, date)
484
484
485 @propertycache
485 @propertycache
486 def _tagscache(self):
486 def _tagscache(self):
487 '''Returns a tagscache object that contains various tags related
487 '''Returns a tagscache object that contains various tags related
488 caches.'''
488 caches.'''
489
489
490 # This simplifies its cache management by having one decorated
490 # This simplifies its cache management by having one decorated
491 # function (this one) and the rest simply fetch things from it.
491 # function (this one) and the rest simply fetch things from it.
492 class tagscache(object):
492 class tagscache(object):
493 def __init__(self):
493 def __init__(self):
494 # These two define the set of tags for this repository. tags
494 # These two define the set of tags for this repository. tags
495 # maps tag name to node; tagtypes maps tag name to 'global' or
495 # maps tag name to node; tagtypes maps tag name to 'global' or
496 # 'local'. (Global tags are defined by .hgtags across all
496 # 'local'. (Global tags are defined by .hgtags across all
497 # heads, and local tags are defined in .hg/localtags.)
497 # heads, and local tags are defined in .hg/localtags.)
498 # They constitute the in-memory cache of tags.
498 # They constitute the in-memory cache of tags.
499 self.tags = self.tagtypes = None
499 self.tags = self.tagtypes = None
500
500
501 self.nodetagscache = self.tagslist = None
501 self.nodetagscache = self.tagslist = None
502
502
503 cache = tagscache()
503 cache = tagscache()
504 cache.tags, cache.tagtypes = self._findtags()
504 cache.tags, cache.tagtypes = self._findtags()
505
505
506 return cache
506 return cache
507
507
508 def tags(self):
508 def tags(self):
509 '''return a mapping of tag to node'''
509 '''return a mapping of tag to node'''
510 t = {}
510 t = {}
511 if self.changelog.filteredrevs:
511 if self.changelog.filteredrevs:
512 tags, tt = self._findtags()
512 tags, tt = self._findtags()
513 else:
513 else:
514 tags = self._tagscache.tags
514 tags = self._tagscache.tags
515 for k, v in tags.iteritems():
515 for k, v in tags.iteritems():
516 try:
516 try:
517 # ignore tags to unknown nodes
517 # ignore tags to unknown nodes
518 self.changelog.rev(v)
518 self.changelog.rev(v)
519 t[k] = v
519 t[k] = v
520 except (error.LookupError, ValueError):
520 except (error.LookupError, ValueError):
521 pass
521 pass
522 return t
522 return t
523
523
524 def _findtags(self):
524 def _findtags(self):
525 '''Do the hard work of finding tags. Return a pair of dicts
525 '''Do the hard work of finding tags. Return a pair of dicts
526 (tags, tagtypes) where tags maps tag name to node, and tagtypes
526 (tags, tagtypes) where tags maps tag name to node, and tagtypes
527 maps tag name to a string like \'global\' or \'local\'.
527 maps tag name to a string like \'global\' or \'local\'.
528 Subclasses or extensions are free to add their own tags, but
528 Subclasses or extensions are free to add their own tags, but
529 should be aware that the returned dicts will be retained for the
529 should be aware that the returned dicts will be retained for the
530 duration of the localrepo object.'''
530 duration of the localrepo object.'''
531
531
532 # XXX what tagtype should subclasses/extensions use? Currently
532 # XXX what tagtype should subclasses/extensions use? Currently
533 # mq and bookmarks add tags, but do not set the tagtype at all.
533 # mq and bookmarks add tags, but do not set the tagtype at all.
534 # Should each extension invent its own tag type? Should there
534 # Should each extension invent its own tag type? Should there
535 # be one tagtype for all such "virtual" tags? Or is the status
535 # be one tagtype for all such "virtual" tags? Or is the status
536 # quo fine?
536 # quo fine?
537
537
538 alltags = {} # map tag name to (node, hist)
538 alltags = {} # map tag name to (node, hist)
539 tagtypes = {}
539 tagtypes = {}
540
540
541 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
541 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
542 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
542 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
543
543
544 # Build the return dicts. Have to re-encode tag names because
544 # Build the return dicts. Have to re-encode tag names because
545 # the tags module always uses UTF-8 (in order not to lose info
545 # the tags module always uses UTF-8 (in order not to lose info
546 # writing to the cache), but the rest of Mercurial wants them in
546 # writing to the cache), but the rest of Mercurial wants them in
547 # local encoding.
547 # local encoding.
548 tags = {}
548 tags = {}
549 for (name, (node, hist)) in alltags.iteritems():
549 for (name, (node, hist)) in alltags.iteritems():
550 if node != nullid:
550 if node != nullid:
551 tags[encoding.tolocal(name)] = node
551 tags[encoding.tolocal(name)] = node
552 tags['tip'] = self.changelog.tip()
552 tags['tip'] = self.changelog.tip()
553 tagtypes = dict([(encoding.tolocal(name), value)
553 tagtypes = dict([(encoding.tolocal(name), value)
554 for (name, value) in tagtypes.iteritems()])
554 for (name, value) in tagtypes.iteritems()])
555 return (tags, tagtypes)
555 return (tags, tagtypes)
556
556
557 def tagtype(self, tagname):
557 def tagtype(self, tagname):
558 '''
558 '''
559 return the type of the given tag. result can be:
559 return the type of the given tag. result can be:
560
560
561 'local' : a local tag
561 'local' : a local tag
562 'global' : a global tag
562 'global' : a global tag
563 None : tag does not exist
563 None : tag does not exist
564 '''
564 '''
565
565
566 return self._tagscache.tagtypes.get(tagname)
566 return self._tagscache.tagtypes.get(tagname)
567
567
568 def tagslist(self):
568 def tagslist(self):
569 '''return a list of tags ordered by revision'''
569 '''return a list of tags ordered by revision'''
570 if not self._tagscache.tagslist:
570 if not self._tagscache.tagslist:
571 l = []
571 l = []
572 for t, n in self.tags().iteritems():
572 for t, n in self.tags().iteritems():
573 r = self.changelog.rev(n)
573 r = self.changelog.rev(n)
574 l.append((r, t, n))
574 l.append((r, t, n))
575 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
575 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
576
576
577 return self._tagscache.tagslist
577 return self._tagscache.tagslist
578
578
579 def nodetags(self, node):
579 def nodetags(self, node):
580 '''return the tags associated with a node'''
580 '''return the tags associated with a node'''
581 if not self._tagscache.nodetagscache:
581 if not self._tagscache.nodetagscache:
582 nodetagscache = {}
582 nodetagscache = {}
583 for t, n in self._tagscache.tags.iteritems():
583 for t, n in self._tagscache.tags.iteritems():
584 nodetagscache.setdefault(n, []).append(t)
584 nodetagscache.setdefault(n, []).append(t)
585 for tags in nodetagscache.itervalues():
585 for tags in nodetagscache.itervalues():
586 tags.sort()
586 tags.sort()
587 self._tagscache.nodetagscache = nodetagscache
587 self._tagscache.nodetagscache = nodetagscache
588 return self._tagscache.nodetagscache.get(node, [])
588 return self._tagscache.nodetagscache.get(node, [])
589
589
590 def nodebookmarks(self, node):
590 def nodebookmarks(self, node):
591 marks = []
591 marks = []
592 for bookmark, n in self._bookmarks.iteritems():
592 for bookmark, n in self._bookmarks.iteritems():
593 if n == node:
593 if n == node:
594 marks.append(bookmark)
594 marks.append(bookmark)
595 return sorted(marks)
595 return sorted(marks)
596
596
597 def _branchtags(self, partial, lrev):
597 def _branchtags(self, partial, lrev):
598 # TODO: rename this function?
598 # TODO: rename this function?
599 tiprev = len(self) - 1
599 tiprev = len(self) - 1
600 if lrev != tiprev:
600 if lrev != tiprev:
601 ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
601 ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
602 self._updatebranchcache(partial, ctxgen)
602 self._updatebranchcache(partial, ctxgen)
603 self._writebranchcache(partial, self.changelog.tip(), tiprev)
603 self._writebranchcache(partial, self.changelog.tip(), tiprev)
604
604
605 return partial
605 return partial
606
606
607 def updatebranchcache(self):
607 def updatebranchcache(self):
608 tip = self.changelog.tip()
608 tip = self.changelog.tip()
609 if self._branchcache is not None and self._branchcachetip == tip:
609 if self._branchcache is not None and self._branchcachetip == tip:
610 return
610 return
611
611
612 oldtip = self._branchcachetip
612 oldtip = self._branchcachetip
613 self._branchcachetip = tip
613 self._branchcachetip = tip
614 if oldtip is None or oldtip not in self.changelog.nodemap:
614 if oldtip is None or oldtip not in self.changelog.nodemap:
615 partial, last, lrev = self._readbranchcache()
615 partial, last, lrev = self._readbranchcache()
616 else:
616 else:
617 lrev = self.changelog.rev(oldtip)
617 lrev = self.changelog.rev(oldtip)
618 partial = self._branchcache
618 partial = self._branchcache
619
619
620 self._branchtags(partial, lrev)
620 self._branchtags(partial, lrev)
621 # this private cache holds all heads (not just the branch tips)
621 # this private cache holds all heads (not just the branch tips)
622 self._branchcache = partial
622 self._branchcache = partial
623
623
624 def branchmap(self):
624 def branchmap(self):
625 '''returns a dictionary {branch: [branchheads]}'''
625 '''returns a dictionary {branch: [branchheads]}'''
626 if self.changelog.filteredrevs:
626 if self.changelog.filteredrevs:
627 # some changeset are excluded we can't use the cache
627 # some changeset are excluded we can't use the cache
628 branchmap = {}
628 branchmap = {}
629 self._updatebranchcache(branchmap, (self[r] for r in self))
629 self._updatebranchcache(branchmap, (self[r] for r in self))
630 return branchmap
630 return branchmap
631 else:
631 else:
632 self.updatebranchcache()
632 self.updatebranchcache()
633 return self._branchcache
633 return self._branchcache
634
634
635
635
636 def _branchtip(self, heads):
636 def _branchtip(self, heads):
637 '''return the tipmost branch head in heads'''
637 '''return the tipmost branch head in heads'''
638 tip = heads[-1]
638 tip = heads[-1]
639 for h in reversed(heads):
639 for h in reversed(heads):
640 if not self[h].closesbranch():
640 if not self[h].closesbranch():
641 tip = h
641 tip = h
642 break
642 break
643 return tip
643 return tip
644
644
645 def branchtip(self, branch):
645 def branchtip(self, branch):
646 '''return the tip node for a given branch'''
646 '''return the tip node for a given branch'''
647 if branch not in self.branchmap():
647 if branch not in self.branchmap():
648 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
648 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
649 return self._branchtip(self.branchmap()[branch])
649 return self._branchtip(self.branchmap()[branch])
650
650
651 def branchtags(self):
651 def branchtags(self):
652 '''return a dict where branch names map to the tipmost head of
652 '''return a dict where branch names map to the tipmost head of
653 the branch, open heads come before closed'''
653 the branch, open heads come before closed'''
654 bt = {}
654 bt = {}
655 for bn, heads in self.branchmap().iteritems():
655 for bn, heads in self.branchmap().iteritems():
656 bt[bn] = self._branchtip(heads)
656 bt[bn] = self._branchtip(heads)
657 return bt
657 return bt
658
658
659 def _readbranchcache(self):
659 def _readbranchcache(self):
660 partial = {}
660 partial = {}
661 try:
661 try:
662 f = self.opener("cache/branchheads")
662 f = self.opener("cache/branchheads")
663 lines = f.read().split('\n')
663 lines = f.read().split('\n')
664 f.close()
664 f.close()
665 except (IOError, OSError):
665 except (IOError, OSError):
666 return {}, nullid, nullrev
666 return {}, nullid, nullrev
667
667
668 try:
668 try:
669 last, lrev = lines.pop(0).split(" ", 1)
669 last, lrev = lines.pop(0).split(" ", 1)
670 last, lrev = bin(last), int(lrev)
670 last, lrev = bin(last), int(lrev)
671 if lrev >= len(self) or self[lrev].node() != last:
671 if lrev >= len(self) or self[lrev].node() != last:
672 # invalidate the cache
672 # invalidate the cache
673 raise ValueError('invalidating branch cache (tip differs)')
673 raise ValueError('invalidating branch cache (tip differs)')
674 for l in lines:
674 for l in lines:
675 if not l:
675 if not l:
676 continue
676 continue
677 node, label = l.split(" ", 1)
677 node, label = l.split(" ", 1)
678 label = encoding.tolocal(label.strip())
678 label = encoding.tolocal(label.strip())
679 if not node in self:
679 if not node in self:
680 raise ValueError('invalidating branch cache because node '+
680 raise ValueError('invalidating branch cache because node '+
681 '%s does not exist' % node)
681 '%s does not exist' % node)
682 partial.setdefault(label, []).append(bin(node))
682 partial.setdefault(label, []).append(bin(node))
683 except KeyboardInterrupt:
683 except KeyboardInterrupt:
684 raise
684 raise
685 except Exception, inst:
685 except Exception, inst:
686 if self.ui.debugflag:
686 if self.ui.debugflag:
687 self.ui.warn(str(inst), '\n')
687 self.ui.warn(str(inst), '\n')
688 partial, last, lrev = {}, nullid, nullrev
688 partial, last, lrev = {}, nullid, nullrev
689 return partial, last, lrev
689 return partial, last, lrev
690
690
691 def _writebranchcache(self, branches, tip, tiprev):
691 def _writebranchcache(self, branches, tip, tiprev):
692 try:
692 try:
693 f = self.opener("cache/branchheads", "w", atomictemp=True)
693 f = self.opener("cache/branchheads", "w", atomictemp=True)
694 f.write("%s %s\n" % (hex(tip), tiprev))
694 f.write("%s %s\n" % (hex(tip), tiprev))
695 for label, nodes in branches.iteritems():
695 for label, nodes in branches.iteritems():
696 for node in nodes:
696 for node in nodes:
697 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
697 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
698 f.close()
698 f.close()
699 except (IOError, OSError):
699 except (IOError, OSError):
700 pass
700 pass
701
701
702 def _updatebranchcache(self, partial, ctxgen):
702 def _updatebranchcache(self, partial, ctxgen):
703 """Given a branchhead cache, partial, that may have extra nodes or be
703 """Given a branchhead cache, partial, that may have extra nodes or be
704 missing heads, and a generator of nodes that are at least a superset of
704 missing heads, and a generator of nodes that are at least a superset of
705 heads missing, this function updates partial to be correct.
705 heads missing, this function updates partial to be correct.
706 """
706 """
707 # collect new branch entries
707 # collect new branch entries
708 newbranches = {}
708 newbranches = {}
709 for c in ctxgen:
709 for c in ctxgen:
710 newbranches.setdefault(c.branch(), []).append(c.node())
710 newbranches.setdefault(c.branch(), []).append(c.node())
711 # if older branchheads are reachable from new ones, they aren't
711 # if older branchheads are reachable from new ones, they aren't
712 # really branchheads. Note checking parents is insufficient:
712 # really branchheads. Note checking parents is insufficient:
713 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
713 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
714 for branch, newnodes in newbranches.iteritems():
714 for branch, newnodes in newbranches.iteritems():
715 bheads = partial.setdefault(branch, [])
715 bheads = partial.setdefault(branch, [])
716 # Remove candidate heads that no longer are in the repo (e.g., as
716 # Remove candidate heads that no longer are in the repo (e.g., as
717 # the result of a strip that just happened). Avoid using 'node in
717 # the result of a strip that just happened). Avoid using 'node in
718 # self' here because that dives down into branchcache code somewhat
718 # self' here because that dives down into branchcache code somewhat
719 # recursively.
719 # recursively.
720 bheadrevs = [self.changelog.rev(node) for node in bheads
720 bheadrevs = [self.changelog.rev(node) for node in bheads
721 if self.changelog.hasnode(node)]
721 if self.changelog.hasnode(node)]
722 newheadrevs = [self.changelog.rev(node) for node in newnodes
722 newheadrevs = [self.changelog.rev(node) for node in newnodes
723 if self.changelog.hasnode(node)]
723 if self.changelog.hasnode(node)]
724 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
724 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
725 # Remove duplicates - nodes that are in newheadrevs and are already
725 # Remove duplicates - nodes that are in newheadrevs and are already
726 # in bheadrevs. This can happen if you strip a node whose parent
726 # in bheadrevs. This can happen if you strip a node whose parent
727 # was already a head (because they're on different branches).
727 # was already a head (because they're on different branches).
728 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
728 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
729
729
730 # Starting from tip means fewer passes over reachable. If we know
730 # Starting from tip means fewer passes over reachable. If we know
731 # the new candidates are not ancestors of existing heads, we don't
731 # the new candidates are not ancestors of existing heads, we don't
732 # have to examine ancestors of existing heads
732 # have to examine ancestors of existing heads
733 if ctxisnew:
733 if ctxisnew:
734 iterrevs = sorted(newheadrevs)
734 iterrevs = sorted(newheadrevs)
735 else:
735 else:
736 iterrevs = list(bheadrevs)
736 iterrevs = list(bheadrevs)
737
737
738 # This loop prunes out two kinds of heads - heads that are
738 # This loop prunes out two kinds of heads - heads that are
739 # superseded by a head in newheadrevs, and newheadrevs that are not
739 # superseded by a head in newheadrevs, and newheadrevs that are not
740 # heads because an existing head is their descendant.
740 # heads because an existing head is their descendant.
741 while iterrevs:
741 while iterrevs:
742 latest = iterrevs.pop()
742 latest = iterrevs.pop()
743 if latest not in bheadrevs:
743 if latest not in bheadrevs:
744 continue
744 continue
745 ancestors = set(self.changelog.ancestors([latest],
745 ancestors = set(self.changelog.ancestors([latest],
746 bheadrevs[0]))
746 bheadrevs[0]))
747 if ancestors:
747 if ancestors:
748 bheadrevs = [b for b in bheadrevs if b not in ancestors]
748 bheadrevs = [b for b in bheadrevs if b not in ancestors]
749 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
749 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
750
750
751 # There may be branches that cease to exist when the last commit in the
751 # There may be branches that cease to exist when the last commit in the
752 # branch was stripped. This code filters them out. Note that the
752 # branch was stripped. This code filters them out. Note that the
753 # branch that ceased to exist may not be in newbranches because
753 # branch that ceased to exist may not be in newbranches because
754 # newbranches is the set of candidate heads, which when you strip the
754 # newbranches is the set of candidate heads, which when you strip the
755 # last commit in a branch will be the parent branch.
755 # last commit in a branch will be the parent branch.
756 for branch in partial.keys():
756 for branch in partial.keys():
757 nodes = [head for head in partial[branch]
757 nodes = [head for head in partial[branch]
758 if self.changelog.hasnode(head)]
758 if self.changelog.hasnode(head)]
759 if not nodes:
759 if not nodes:
760 del partial[branch]
760 del partial[branch]
761
761
762 def lookup(self, key):
762 def lookup(self, key):
763 return self[key].node()
763 return self[key].node()
764
764
765 def lookupbranch(self, key, remote=None):
765 def lookupbranch(self, key, remote=None):
766 repo = remote or self
766 repo = remote or self
767 if key in repo.branchmap():
767 if key in repo.branchmap():
768 return key
768 return key
769
769
770 repo = (remote and remote.local()) and remote or self
770 repo = (remote and remote.local()) and remote or self
771 return repo[key].branch()
771 return repo[key].branch()
772
772
773 def known(self, nodes):
773 def known(self, nodes):
774 nm = self.changelog.nodemap
774 nm = self.changelog.nodemap
775 pc = self._phasecache
775 pc = self._phasecache
776 result = []
776 result = []
777 for n in nodes:
777 for n in nodes:
778 r = nm.get(n)
778 r = nm.get(n)
779 resp = not (r is None or pc.phase(self, r) >= phases.secret)
779 resp = not (r is None or pc.phase(self, r) >= phases.secret)
780 result.append(resp)
780 result.append(resp)
781 return result
781 return result
782
782
783 def local(self):
783 def local(self):
784 return self
784 return self
785
785
786 def cancopy(self):
786 def cancopy(self):
787 return self.local() # so statichttprepo's override of local() works
787 return self.local() # so statichttprepo's override of local() works
788
788
789 def join(self, f):
789 def join(self, f):
790 return os.path.join(self.path, f)
790 return os.path.join(self.path, f)
791
791
792 def wjoin(self, f):
792 def wjoin(self, f):
793 return os.path.join(self.root, f)
793 return os.path.join(self.root, f)
794
794
795 def file(self, f):
795 def file(self, f):
796 if f[0] == '/':
796 if f[0] == '/':
797 f = f[1:]
797 f = f[1:]
798 return filelog.filelog(self.sopener, f)
798 return filelog.filelog(self.sopener, f)
799
799
800 def changectx(self, changeid):
800 def changectx(self, changeid):
801 return self[changeid]
801 return self[changeid]
802
802
803 def parents(self, changeid=None):
803 def parents(self, changeid=None):
804 '''get list of changectxs for parents of changeid'''
804 '''get list of changectxs for parents of changeid'''
805 return self[changeid].parents()
805 return self[changeid].parents()
806
806
807 def setparents(self, p1, p2=nullid):
807 def setparents(self, p1, p2=nullid):
808 copies = self.dirstate.setparents(p1, p2)
808 copies = self.dirstate.setparents(p1, p2)
809 if copies:
809 if copies:
810 # Adjust copy records, the dirstate cannot do it, it
810 # Adjust copy records, the dirstate cannot do it, it
811 # requires access to parents manifests. Preserve them
811 # requires access to parents manifests. Preserve them
812 # only for entries added to first parent.
812 # only for entries added to first parent.
813 pctx = self[p1]
813 pctx = self[p1]
814 for f in copies:
814 for f in copies:
815 if f not in pctx and copies[f] in pctx:
815 if f not in pctx and copies[f] in pctx:
816 self.dirstate.copy(copies[f], f)
816 self.dirstate.copy(copies[f], f)
817
817
818 def filectx(self, path, changeid=None, fileid=None):
818 def filectx(self, path, changeid=None, fileid=None):
819 """changeid can be a changeset revision, node, or tag.
819 """changeid can be a changeset revision, node, or tag.
820 fileid can be a file revision or node."""
820 fileid can be a file revision or node."""
821 return context.filectx(self, path, changeid, fileid)
821 return context.filectx(self, path, changeid, fileid)
822
822
823 def getcwd(self):
823 def getcwd(self):
824 return self.dirstate.getcwd()
824 return self.dirstate.getcwd()
825
825
826 def pathto(self, f, cwd=None):
826 def pathto(self, f, cwd=None):
827 return self.dirstate.pathto(f, cwd)
827 return self.dirstate.pathto(f, cwd)
828
828
829 def wfile(self, f, mode='r'):
829 def wfile(self, f, mode='r'):
830 return self.wopener(f, mode)
830 return self.wopener(f, mode)
831
831
832 def _link(self, f):
832 def _link(self, f):
833 return os.path.islink(self.wjoin(f))
833 return os.path.islink(self.wjoin(f))
834
834
835 def _loadfilter(self, filter):
835 def _loadfilter(self, filter):
836 if filter not in self.filterpats:
836 if filter not in self.filterpats:
837 l = []
837 l = []
838 for pat, cmd in self.ui.configitems(filter):
838 for pat, cmd in self.ui.configitems(filter):
839 if cmd == '!':
839 if cmd == '!':
840 continue
840 continue
841 mf = matchmod.match(self.root, '', [pat])
841 mf = matchmod.match(self.root, '', [pat])
842 fn = None
842 fn = None
843 params = cmd
843 params = cmd
844 for name, filterfn in self._datafilters.iteritems():
844 for name, filterfn in self._datafilters.iteritems():
845 if cmd.startswith(name):
845 if cmd.startswith(name):
846 fn = filterfn
846 fn = filterfn
847 params = cmd[len(name):].lstrip()
847 params = cmd[len(name):].lstrip()
848 break
848 break
849 if not fn:
849 if not fn:
850 fn = lambda s, c, **kwargs: util.filter(s, c)
850 fn = lambda s, c, **kwargs: util.filter(s, c)
851 # Wrap old filters not supporting keyword arguments
851 # Wrap old filters not supporting keyword arguments
852 if not inspect.getargspec(fn)[2]:
852 if not inspect.getargspec(fn)[2]:
853 oldfn = fn
853 oldfn = fn
854 fn = lambda s, c, **kwargs: oldfn(s, c)
854 fn = lambda s, c, **kwargs: oldfn(s, c)
855 l.append((mf, fn, params))
855 l.append((mf, fn, params))
856 self.filterpats[filter] = l
856 self.filterpats[filter] = l
857 return self.filterpats[filter]
857 return self.filterpats[filter]
858
858
859 def _filter(self, filterpats, filename, data):
859 def _filter(self, filterpats, filename, data):
860 for mf, fn, cmd in filterpats:
860 for mf, fn, cmd in filterpats:
861 if mf(filename):
861 if mf(filename):
862 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
862 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
863 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
863 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
864 break
864 break
865
865
866 return data
866 return data
867
867
868 @propertycache
868 @propertycache
869 def _encodefilterpats(self):
869 def _encodefilterpats(self):
870 return self._loadfilter('encode')
870 return self._loadfilter('encode')
871
871
872 @propertycache
872 @propertycache
873 def _decodefilterpats(self):
873 def _decodefilterpats(self):
874 return self._loadfilter('decode')
874 return self._loadfilter('decode')
875
875
876 def adddatafilter(self, name, filter):
876 def adddatafilter(self, name, filter):
877 self._datafilters[name] = filter
877 self._datafilters[name] = filter
878
878
879 def wread(self, filename):
879 def wread(self, filename):
880 if self._link(filename):
880 if self._link(filename):
881 data = os.readlink(self.wjoin(filename))
881 data = os.readlink(self.wjoin(filename))
882 else:
882 else:
883 data = self.wopener.read(filename)
883 data = self.wopener.read(filename)
884 return self._filter(self._encodefilterpats, filename, data)
884 return self._filter(self._encodefilterpats, filename, data)
885
885
886 def wwrite(self, filename, data, flags):
886 def wwrite(self, filename, data, flags):
887 data = self._filter(self._decodefilterpats, filename, data)
887 data = self._filter(self._decodefilterpats, filename, data)
888 if 'l' in flags:
888 if 'l' in flags:
889 self.wopener.symlink(data, filename)
889 self.wopener.symlink(data, filename)
890 else:
890 else:
891 self.wopener.write(filename, data)
891 self.wopener.write(filename, data)
892 if 'x' in flags:
892 if 'x' in flags:
893 util.setflags(self.wjoin(filename), False, True)
893 util.setflags(self.wjoin(filename), False, True)
894
894
895 def wwritedata(self, filename, data):
895 def wwritedata(self, filename, data):
896 return self._filter(self._decodefilterpats, filename, data)
896 return self._filter(self._decodefilterpats, filename, data)
897
897
898 def transaction(self, desc):
898 def transaction(self, desc):
899 tr = self._transref and self._transref() or None
899 tr = self._transref and self._transref() or None
900 if tr and tr.running():
900 if tr and tr.running():
901 return tr.nest()
901 return tr.nest()
902
902
903 # abort here if the journal already exists
903 # abort here if the journal already exists
904 if os.path.exists(self.sjoin("journal")):
904 if os.path.exists(self.sjoin("journal")):
905 raise error.RepoError(
905 raise error.RepoError(
906 _("abandoned transaction found - run hg recover"))
906 _("abandoned transaction found - run hg recover"))
907
907
908 self._writejournal(desc)
908 self._writejournal(desc)
909 renames = [(x, undoname(x)) for x in self._journalfiles()]
909 renames = [(x, undoname(x)) for x in self._journalfiles()]
910
910
911 tr = transaction.transaction(self.ui.warn, self.sopener,
911 tr = transaction.transaction(self.ui.warn, self.sopener,
912 self.sjoin("journal"),
912 self.sjoin("journal"),
913 aftertrans(renames),
913 aftertrans(renames),
914 self.store.createmode)
914 self.store.createmode)
915 self._transref = weakref.ref(tr)
915 self._transref = weakref.ref(tr)
916 return tr
916 return tr
917
917
918 def _journalfiles(self):
918 def _journalfiles(self):
919 return (self.sjoin('journal'), self.join('journal.dirstate'),
919 return (self.sjoin('journal'), self.join('journal.dirstate'),
920 self.join('journal.branch'), self.join('journal.desc'),
920 self.join('journal.branch'), self.join('journal.desc'),
921 self.join('journal.bookmarks'),
921 self.join('journal.bookmarks'),
922 self.sjoin('journal.phaseroots'))
922 self.sjoin('journal.phaseroots'))
923
923
924 def undofiles(self):
924 def undofiles(self):
925 return [undoname(x) for x in self._journalfiles()]
925 return [undoname(x) for x in self._journalfiles()]
926
926
927 def _writejournal(self, desc):
927 def _writejournal(self, desc):
928 self.opener.write("journal.dirstate",
928 self.opener.write("journal.dirstate",
929 self.opener.tryread("dirstate"))
929 self.opener.tryread("dirstate"))
930 self.opener.write("journal.branch",
930 self.opener.write("journal.branch",
931 encoding.fromlocal(self.dirstate.branch()))
931 encoding.fromlocal(self.dirstate.branch()))
932 self.opener.write("journal.desc",
932 self.opener.write("journal.desc",
933 "%d\n%s\n" % (len(self), desc))
933 "%d\n%s\n" % (len(self), desc))
934 self.opener.write("journal.bookmarks",
934 self.opener.write("journal.bookmarks",
935 self.opener.tryread("bookmarks"))
935 self.opener.tryread("bookmarks"))
936 self.sopener.write("journal.phaseroots",
936 self.sopener.write("journal.phaseroots",
937 self.sopener.tryread("phaseroots"))
937 self.sopener.tryread("phaseroots"))
938
938
939 def recover(self):
939 def recover(self):
940 lock = self.lock()
940 lock = self.lock()
941 try:
941 try:
942 if os.path.exists(self.sjoin("journal")):
942 if os.path.exists(self.sjoin("journal")):
943 self.ui.status(_("rolling back interrupted transaction\n"))
943 self.ui.status(_("rolling back interrupted transaction\n"))
944 transaction.rollback(self.sopener, self.sjoin("journal"),
944 transaction.rollback(self.sopener, self.sjoin("journal"),
945 self.ui.warn)
945 self.ui.warn)
946 self.invalidate()
946 self.invalidate()
947 return True
947 return True
948 else:
948 else:
949 self.ui.warn(_("no interrupted transaction available\n"))
949 self.ui.warn(_("no interrupted transaction available\n"))
950 return False
950 return False
951 finally:
951 finally:
952 lock.release()
952 lock.release()
953
953
954 def rollback(self, dryrun=False, force=False):
954 def rollback(self, dryrun=False, force=False):
955 wlock = lock = None
955 wlock = lock = None
956 try:
956 try:
957 wlock = self.wlock()
957 wlock = self.wlock()
958 lock = self.lock()
958 lock = self.lock()
959 if os.path.exists(self.sjoin("undo")):
959 if os.path.exists(self.sjoin("undo")):
960 return self._rollback(dryrun, force)
960 return self._rollback(dryrun, force)
961 else:
961 else:
962 self.ui.warn(_("no rollback information available\n"))
962 self.ui.warn(_("no rollback information available\n"))
963 return 1
963 return 1
964 finally:
964 finally:
965 release(lock, wlock)
965 release(lock, wlock)
966
966
967 def _rollback(self, dryrun, force):
967 def _rollback(self, dryrun, force):
968 ui = self.ui
968 ui = self.ui
969 try:
969 try:
970 args = self.opener.read('undo.desc').splitlines()
970 args = self.opener.read('undo.desc').splitlines()
971 (oldlen, desc, detail) = (int(args[0]), args[1], None)
971 (oldlen, desc, detail) = (int(args[0]), args[1], None)
972 if len(args) >= 3:
972 if len(args) >= 3:
973 detail = args[2]
973 detail = args[2]
974 oldtip = oldlen - 1
974 oldtip = oldlen - 1
975
975
976 if detail and ui.verbose:
976 if detail and ui.verbose:
977 msg = (_('repository tip rolled back to revision %s'
977 msg = (_('repository tip rolled back to revision %s'
978 ' (undo %s: %s)\n')
978 ' (undo %s: %s)\n')
979 % (oldtip, desc, detail))
979 % (oldtip, desc, detail))
980 else:
980 else:
981 msg = (_('repository tip rolled back to revision %s'
981 msg = (_('repository tip rolled back to revision %s'
982 ' (undo %s)\n')
982 ' (undo %s)\n')
983 % (oldtip, desc))
983 % (oldtip, desc))
984 except IOError:
984 except IOError:
985 msg = _('rolling back unknown transaction\n')
985 msg = _('rolling back unknown transaction\n')
986 desc = None
986 desc = None
987
987
988 if not force and self['.'] != self['tip'] and desc == 'commit':
988 if not force and self['.'] != self['tip'] and desc == 'commit':
989 raise util.Abort(
989 raise util.Abort(
990 _('rollback of last commit while not checked out '
990 _('rollback of last commit while not checked out '
991 'may lose data'), hint=_('use -f to force'))
991 'may lose data'), hint=_('use -f to force'))
992
992
993 ui.status(msg)
993 ui.status(msg)
994 if dryrun:
994 if dryrun:
995 return 0
995 return 0
996
996
997 parents = self.dirstate.parents()
997 parents = self.dirstate.parents()
998 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
998 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
999 if os.path.exists(self.join('undo.bookmarks')):
999 if os.path.exists(self.join('undo.bookmarks')):
1000 util.rename(self.join('undo.bookmarks'),
1000 util.rename(self.join('undo.bookmarks'),
1001 self.join('bookmarks'))
1001 self.join('bookmarks'))
1002 if os.path.exists(self.sjoin('undo.phaseroots')):
1002 if os.path.exists(self.sjoin('undo.phaseroots')):
1003 util.rename(self.sjoin('undo.phaseroots'),
1003 util.rename(self.sjoin('undo.phaseroots'),
1004 self.sjoin('phaseroots'))
1004 self.sjoin('phaseroots'))
1005 self.invalidate()
1005 self.invalidate()
1006
1006
1007 # Discard all cache entries to force reloading everything.
1007 # Discard all cache entries to force reloading everything.
1008 self._filecache.clear()
1008 self._filecache.clear()
1009
1009
1010 parentgone = (parents[0] not in self.changelog.nodemap or
1010 parentgone = (parents[0] not in self.changelog.nodemap or
1011 parents[1] not in self.changelog.nodemap)
1011 parents[1] not in self.changelog.nodemap)
1012 if parentgone:
1012 if parentgone:
1013 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1013 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1014 try:
1014 try:
1015 branch = self.opener.read('undo.branch')
1015 branch = self.opener.read('undo.branch')
1016 self.dirstate.setbranch(encoding.tolocal(branch))
1016 self.dirstate.setbranch(encoding.tolocal(branch))
1017 except IOError:
1017 except IOError:
1018 ui.warn(_('named branch could not be reset: '
1018 ui.warn(_('named branch could not be reset: '
1019 'current branch is still \'%s\'\n')
1019 'current branch is still \'%s\'\n')
1020 % self.dirstate.branch())
1020 % self.dirstate.branch())
1021
1021
1022 self.dirstate.invalidate()
1022 self.dirstate.invalidate()
1023 parents = tuple([p.rev() for p in self.parents()])
1023 parents = tuple([p.rev() for p in self.parents()])
1024 if len(parents) > 1:
1024 if len(parents) > 1:
1025 ui.status(_('working directory now based on '
1025 ui.status(_('working directory now based on '
1026 'revisions %d and %d\n') % parents)
1026 'revisions %d and %d\n') % parents)
1027 else:
1027 else:
1028 ui.status(_('working directory now based on '
1028 ui.status(_('working directory now based on '
1029 'revision %d\n') % parents)
1029 'revision %d\n') % parents)
1030 # TODO: if we know which new heads may result from this rollback, pass
1030 # TODO: if we know which new heads may result from this rollback, pass
1031 # them to destroy(), which will prevent the branchhead cache from being
1031 # them to destroy(), which will prevent the branchhead cache from being
1032 # invalidated.
1032 # invalidated.
1033 self.destroyed()
1033 self.destroyed()
1034 return 0
1034 return 0
1035
1035
1036 def invalidatecaches(self):
1036 def invalidatecaches(self):
1037 def delcache(name):
1037 def delcache(name):
1038 try:
1038 try:
1039 delattr(self, name)
1039 delattr(self, name)
1040 except AttributeError:
1040 except AttributeError:
1041 pass
1041 pass
1042
1042
1043 delcache('_tagscache')
1043 delcache('_tagscache')
1044
1044
1045 self._branchcache = None # in UTF-8
1045 self._branchcache = None # in UTF-8
1046 self._branchcachetip = None
1046 self._branchcachetip = None
1047 obsolete.clearobscaches(self)
1047 obsolete.clearobscaches(self)
1048
1048
1049 def invalidatedirstate(self):
1049 def invalidatedirstate(self):
1050 '''Invalidates the dirstate, causing the next call to dirstate
1050 '''Invalidates the dirstate, causing the next call to dirstate
1051 to check if it was modified since the last time it was read,
1051 to check if it was modified since the last time it was read,
1052 rereading it if it has.
1052 rereading it if it has.
1053
1053
1054 This is different to dirstate.invalidate() that it doesn't always
1054 This is different to dirstate.invalidate() that it doesn't always
1055 rereads the dirstate. Use dirstate.invalidate() if you want to
1055 rereads the dirstate. Use dirstate.invalidate() if you want to
1056 explicitly read the dirstate again (i.e. restoring it to a previous
1056 explicitly read the dirstate again (i.e. restoring it to a previous
1057 known good state).'''
1057 known good state).'''
1058 if 'dirstate' in self.__dict__:
1058 if 'dirstate' in self.__dict__:
1059 for k in self.dirstate._filecache:
1059 for k in self.dirstate._filecache:
1060 try:
1060 try:
1061 delattr(self.dirstate, k)
1061 delattr(self.dirstate, k)
1062 except AttributeError:
1062 except AttributeError:
1063 pass
1063 pass
1064 delattr(self, 'dirstate')
1064 delattr(self, 'dirstate')
1065
1065
1066 def invalidate(self):
1066 def invalidate(self):
1067 for k in self._filecache:
1067 for k in self._filecache:
1068 # dirstate is invalidated separately in invalidatedirstate()
1068 # dirstate is invalidated separately in invalidatedirstate()
1069 if k == 'dirstate':
1069 if k == 'dirstate':
1070 continue
1070 continue
1071
1071
1072 try:
1072 try:
1073 delattr(self, k)
1073 delattr(self, k)
1074 except AttributeError:
1074 except AttributeError:
1075 pass
1075 pass
1076 self.invalidatecaches()
1076 self.invalidatecaches()
1077
1077
1078 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1078 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1079 try:
1079 try:
1080 l = lock.lock(lockname, 0, releasefn, desc=desc)
1080 l = lock.lock(lockname, 0, releasefn, desc=desc)
1081 except error.LockHeld, inst:
1081 except error.LockHeld, inst:
1082 if not wait:
1082 if not wait:
1083 raise
1083 raise
1084 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1084 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1085 (desc, inst.locker))
1085 (desc, inst.locker))
1086 # default to 600 seconds timeout
1086 # default to 600 seconds timeout
1087 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1087 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1088 releasefn, desc=desc)
1088 releasefn, desc=desc)
1089 if acquirefn:
1089 if acquirefn:
1090 acquirefn()
1090 acquirefn()
1091 return l
1091 return l
1092
1092
1093 def _afterlock(self, callback):
1093 def _afterlock(self, callback):
1094 """add a callback to the current repository lock.
1094 """add a callback to the current repository lock.
1095
1095
1096 The callback will be executed on lock release."""
1096 The callback will be executed on lock release."""
1097 l = self._lockref and self._lockref()
1097 l = self._lockref and self._lockref()
1098 if l:
1098 if l:
1099 l.postrelease.append(callback)
1099 l.postrelease.append(callback)
1100 else:
1100 else:
1101 callback()
1101 callback()
1102
1102
1103 def lock(self, wait=True):
1103 def lock(self, wait=True):
1104 '''Lock the repository store (.hg/store) and return a weak reference
1104 '''Lock the repository store (.hg/store) and return a weak reference
1105 to the lock. Use this before modifying the store (e.g. committing or
1105 to the lock. Use this before modifying the store (e.g. committing or
1106 stripping). If you are opening a transaction, get a lock as well.)'''
1106 stripping). If you are opening a transaction, get a lock as well.)'''
1107 l = self._lockref and self._lockref()
1107 l = self._lockref and self._lockref()
1108 if l is not None and l.held:
1108 if l is not None and l.held:
1109 l.lock()
1109 l.lock()
1110 return l
1110 return l
1111
1111
1112 def unlock():
1112 def unlock():
1113 self.store.write()
1113 self.store.write()
1114 if '_phasecache' in vars(self):
1114 if '_phasecache' in vars(self):
1115 self._phasecache.write()
1115 self._phasecache.write()
1116 for k, ce in self._filecache.items():
1116 for k, ce in self._filecache.items():
1117 if k == 'dirstate':
1117 if k == 'dirstate':
1118 continue
1118 continue
1119 ce.refresh()
1119 ce.refresh()
1120
1120
1121 l = self._lock(self.sjoin("lock"), wait, unlock,
1121 l = self._lock(self.sjoin("lock"), wait, unlock,
1122 self.invalidate, _('repository %s') % self.origroot)
1122 self.invalidate, _('repository %s') % self.origroot)
1123 self._lockref = weakref.ref(l)
1123 self._lockref = weakref.ref(l)
1124 return l
1124 return l
1125
1125
1126 def wlock(self, wait=True):
1126 def wlock(self, wait=True):
1127 '''Lock the non-store parts of the repository (everything under
1127 '''Lock the non-store parts of the repository (everything under
1128 .hg except .hg/store) and return a weak reference to the lock.
1128 .hg except .hg/store) and return a weak reference to the lock.
1129 Use this before modifying files in .hg.'''
1129 Use this before modifying files in .hg.'''
1130 l = self._wlockref and self._wlockref()
1130 l = self._wlockref and self._wlockref()
1131 if l is not None and l.held:
1131 if l is not None and l.held:
1132 l.lock()
1132 l.lock()
1133 return l
1133 return l
1134
1134
1135 def unlock():
1135 def unlock():
1136 self.dirstate.write()
1136 self.dirstate.write()
1137 ce = self._filecache.get('dirstate')
1137 ce = self._filecache.get('dirstate')
1138 if ce:
1138 if ce:
1139 ce.refresh()
1139 ce.refresh()
1140
1140
1141 l = self._lock(self.join("wlock"), wait, unlock,
1141 l = self._lock(self.join("wlock"), wait, unlock,
1142 self.invalidatedirstate, _('working directory of %s') %
1142 self.invalidatedirstate, _('working directory of %s') %
1143 self.origroot)
1143 self.origroot)
1144 self._wlockref = weakref.ref(l)
1144 self._wlockref = weakref.ref(l)
1145 return l
1145 return l
1146
1146
1147 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1147 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1148 """
1148 """
1149 commit an individual file as part of a larger transaction
1149 commit an individual file as part of a larger transaction
1150 """
1150 """
1151
1151
1152 fname = fctx.path()
1152 fname = fctx.path()
1153 text = fctx.data()
1153 text = fctx.data()
1154 flog = self.file(fname)
1154 flog = self.file(fname)
1155 fparent1 = manifest1.get(fname, nullid)
1155 fparent1 = manifest1.get(fname, nullid)
1156 fparent2 = fparent2o = manifest2.get(fname, nullid)
1156 fparent2 = fparent2o = manifest2.get(fname, nullid)
1157
1157
1158 meta = {}
1158 meta = {}
1159 copy = fctx.renamed()
1159 copy = fctx.renamed()
1160 if copy and copy[0] != fname:
1160 if copy and copy[0] != fname:
1161 # Mark the new revision of this file as a copy of another
1161 # Mark the new revision of this file as a copy of another
1162 # file. This copy data will effectively act as a parent
1162 # file. This copy data will effectively act as a parent
1163 # of this new revision. If this is a merge, the first
1163 # of this new revision. If this is a merge, the first
1164 # parent will be the nullid (meaning "look up the copy data")
1164 # parent will be the nullid (meaning "look up the copy data")
1165 # and the second one will be the other parent. For example:
1165 # and the second one will be the other parent. For example:
1166 #
1166 #
1167 # 0 --- 1 --- 3 rev1 changes file foo
1167 # 0 --- 1 --- 3 rev1 changes file foo
1168 # \ / rev2 renames foo to bar and changes it
1168 # \ / rev2 renames foo to bar and changes it
1169 # \- 2 -/ rev3 should have bar with all changes and
1169 # \- 2 -/ rev3 should have bar with all changes and
1170 # should record that bar descends from
1170 # should record that bar descends from
1171 # bar in rev2 and foo in rev1
1171 # bar in rev2 and foo in rev1
1172 #
1172 #
1173 # this allows this merge to succeed:
1173 # this allows this merge to succeed:
1174 #
1174 #
1175 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1175 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1176 # \ / merging rev3 and rev4 should use bar@rev2
1176 # \ / merging rev3 and rev4 should use bar@rev2
1177 # \- 2 --- 4 as the merge base
1177 # \- 2 --- 4 as the merge base
1178 #
1178 #
1179
1179
1180 cfname = copy[0]
1180 cfname = copy[0]
1181 crev = manifest1.get(cfname)
1181 crev = manifest1.get(cfname)
1182 newfparent = fparent2
1182 newfparent = fparent2
1183
1183
1184 if manifest2: # branch merge
1184 if manifest2: # branch merge
1185 if fparent2 == nullid or crev is None: # copied on remote side
1185 if fparent2 == nullid or crev is None: # copied on remote side
1186 if cfname in manifest2:
1186 if cfname in manifest2:
1187 crev = manifest2[cfname]
1187 crev = manifest2[cfname]
1188 newfparent = fparent1
1188 newfparent = fparent1
1189
1189
1190 # find source in nearest ancestor if we've lost track
1190 # find source in nearest ancestor if we've lost track
1191 if not crev:
1191 if not crev:
1192 self.ui.debug(" %s: searching for copy revision for %s\n" %
1192 self.ui.debug(" %s: searching for copy revision for %s\n" %
1193 (fname, cfname))
1193 (fname, cfname))
1194 for ancestor in self[None].ancestors():
1194 for ancestor in self[None].ancestors():
1195 if cfname in ancestor:
1195 if cfname in ancestor:
1196 crev = ancestor[cfname].filenode()
1196 crev = ancestor[cfname].filenode()
1197 break
1197 break
1198
1198
1199 if crev:
1199 if crev:
1200 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1200 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1201 meta["copy"] = cfname
1201 meta["copy"] = cfname
1202 meta["copyrev"] = hex(crev)
1202 meta["copyrev"] = hex(crev)
1203 fparent1, fparent2 = nullid, newfparent
1203 fparent1, fparent2 = nullid, newfparent
1204 else:
1204 else:
1205 self.ui.warn(_("warning: can't find ancestor for '%s' "
1205 self.ui.warn(_("warning: can't find ancestor for '%s' "
1206 "copied from '%s'!\n") % (fname, cfname))
1206 "copied from '%s'!\n") % (fname, cfname))
1207
1207
1208 elif fparent2 != nullid:
1208 elif fparent2 != nullid:
1209 # is one parent an ancestor of the other?
1209 # is one parent an ancestor of the other?
1210 fparentancestor = flog.ancestor(fparent1, fparent2)
1210 fparentancestor = flog.ancestor(fparent1, fparent2)
1211 if fparentancestor == fparent1:
1211 if fparentancestor == fparent1:
1212 fparent1, fparent2 = fparent2, nullid
1212 fparent1, fparent2 = fparent2, nullid
1213 elif fparentancestor == fparent2:
1213 elif fparentancestor == fparent2:
1214 fparent2 = nullid
1214 fparent2 = nullid
1215
1215
1216 # is the file changed?
1216 # is the file changed?
1217 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1217 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1218 changelist.append(fname)
1218 changelist.append(fname)
1219 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1219 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1220
1220
1221 # are just the flags changed during merge?
1221 # are just the flags changed during merge?
1222 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1222 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1223 changelist.append(fname)
1223 changelist.append(fname)
1224
1224
1225 return fparent1
1225 return fparent1
1226
1226
1227 def commit(self, text="", user=None, date=None, match=None, force=False,
1227 def commit(self, text="", user=None, date=None, match=None, force=False,
1228 editor=False, extra={}):
1228 editor=False, extra={}):
1229 """Add a new revision to current repository.
1229 """Add a new revision to current repository.
1230
1230
1231 Revision information is gathered from the working directory,
1231 Revision information is gathered from the working directory,
1232 match can be used to filter the committed files. If editor is
1232 match can be used to filter the committed files. If editor is
1233 supplied, it is called to get a commit message.
1233 supplied, it is called to get a commit message.
1234 """
1234 """
1235
1235
1236 def fail(f, msg):
1236 def fail(f, msg):
1237 raise util.Abort('%s: %s' % (f, msg))
1237 raise util.Abort('%s: %s' % (f, msg))
1238
1238
1239 if not match:
1239 if not match:
1240 match = matchmod.always(self.root, '')
1240 match = matchmod.always(self.root, '')
1241
1241
1242 if not force:
1242 if not force:
1243 vdirs = []
1243 vdirs = []
1244 match.dir = vdirs.append
1244 match.dir = vdirs.append
1245 match.bad = fail
1245 match.bad = fail
1246
1246
1247 wlock = self.wlock()
1247 wlock = self.wlock()
1248 try:
1248 try:
1249 wctx = self[None]
1249 wctx = self[None]
1250 merge = len(wctx.parents()) > 1
1250 merge = len(wctx.parents()) > 1
1251
1251
1252 if (not force and merge and match and
1252 if (not force and merge and match and
1253 (match.files() or match.anypats())):
1253 (match.files() or match.anypats())):
1254 raise util.Abort(_('cannot partially commit a merge '
1254 raise util.Abort(_('cannot partially commit a merge '
1255 '(do not specify files or patterns)'))
1255 '(do not specify files or patterns)'))
1256
1256
1257 changes = self.status(match=match, clean=force)
1257 changes = self.status(match=match, clean=force)
1258 if force:
1258 if force:
1259 changes[0].extend(changes[6]) # mq may commit unchanged files
1259 changes[0].extend(changes[6]) # mq may commit unchanged files
1260
1260
1261 # check subrepos
1261 # check subrepos
1262 subs = []
1262 subs = []
1263 commitsubs = set()
1263 commitsubs = set()
1264 newstate = wctx.substate.copy()
1264 newstate = wctx.substate.copy()
1265 # only manage subrepos and .hgsubstate if .hgsub is present
1265 # only manage subrepos and .hgsubstate if .hgsub is present
1266 if '.hgsub' in wctx:
1266 if '.hgsub' in wctx:
1267 # we'll decide whether to track this ourselves, thanks
1267 # we'll decide whether to track this ourselves, thanks
1268 if '.hgsubstate' in changes[0]:
1268 if '.hgsubstate' in changes[0]:
1269 changes[0].remove('.hgsubstate')
1269 changes[0].remove('.hgsubstate')
1270 if '.hgsubstate' in changes[2]:
1270 if '.hgsubstate' in changes[2]:
1271 changes[2].remove('.hgsubstate')
1271 changes[2].remove('.hgsubstate')
1272
1272
1273 # compare current state to last committed state
1273 # compare current state to last committed state
1274 # build new substate based on last committed state
1274 # build new substate based on last committed state
1275 oldstate = wctx.p1().substate
1275 oldstate = wctx.p1().substate
1276 for s in sorted(newstate.keys()):
1276 for s in sorted(newstate.keys()):
1277 if not match(s):
1277 if not match(s):
1278 # ignore working copy, use old state if present
1278 # ignore working copy, use old state if present
1279 if s in oldstate:
1279 if s in oldstate:
1280 newstate[s] = oldstate[s]
1280 newstate[s] = oldstate[s]
1281 continue
1281 continue
1282 if not force:
1282 if not force:
1283 raise util.Abort(
1283 raise util.Abort(
1284 _("commit with new subrepo %s excluded") % s)
1284 _("commit with new subrepo %s excluded") % s)
1285 if wctx.sub(s).dirty(True):
1285 if wctx.sub(s).dirty(True):
1286 if not self.ui.configbool('ui', 'commitsubrepos'):
1286 if not self.ui.configbool('ui', 'commitsubrepos'):
1287 raise util.Abort(
1287 raise util.Abort(
1288 _("uncommitted changes in subrepo %s") % s,
1288 _("uncommitted changes in subrepo %s") % s,
1289 hint=_("use --subrepos for recursive commit"))
1289 hint=_("use --subrepos for recursive commit"))
1290 subs.append(s)
1290 subs.append(s)
1291 commitsubs.add(s)
1291 commitsubs.add(s)
1292 else:
1292 else:
1293 bs = wctx.sub(s).basestate()
1293 bs = wctx.sub(s).basestate()
1294 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1294 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1295 if oldstate.get(s, (None, None, None))[1] != bs:
1295 if oldstate.get(s, (None, None, None))[1] != bs:
1296 subs.append(s)
1296 subs.append(s)
1297
1297
1298 # check for removed subrepos
1298 # check for removed subrepos
1299 for p in wctx.parents():
1299 for p in wctx.parents():
1300 r = [s for s in p.substate if s not in newstate]
1300 r = [s for s in p.substate if s not in newstate]
1301 subs += [s for s in r if match(s)]
1301 subs += [s for s in r if match(s)]
1302 if subs:
1302 if subs:
1303 if (not match('.hgsub') and
1303 if (not match('.hgsub') and
1304 '.hgsub' in (wctx.modified() + wctx.added())):
1304 '.hgsub' in (wctx.modified() + wctx.added())):
1305 raise util.Abort(
1305 raise util.Abort(
1306 _("can't commit subrepos without .hgsub"))
1306 _("can't commit subrepos without .hgsub"))
1307 changes[0].insert(0, '.hgsubstate')
1307 changes[0].insert(0, '.hgsubstate')
1308
1308
1309 elif '.hgsub' in changes[2]:
1309 elif '.hgsub' in changes[2]:
1310 # clean up .hgsubstate when .hgsub is removed
1310 # clean up .hgsubstate when .hgsub is removed
1311 if ('.hgsubstate' in wctx and
1311 if ('.hgsubstate' in wctx and
1312 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1312 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1313 changes[2].insert(0, '.hgsubstate')
1313 changes[2].insert(0, '.hgsubstate')
1314
1314
1315 # make sure all explicit patterns are matched
1315 # make sure all explicit patterns are matched
1316 if not force and match.files():
1316 if not force and match.files():
1317 matched = set(changes[0] + changes[1] + changes[2])
1317 matched = set(changes[0] + changes[1] + changes[2])
1318
1318
1319 for f in match.files():
1319 for f in match.files():
1320 f = self.dirstate.normalize(f)
1320 f = self.dirstate.normalize(f)
1321 if f == '.' or f in matched or f in wctx.substate:
1321 if f == '.' or f in matched or f in wctx.substate:
1322 continue
1322 continue
1323 if f in changes[3]: # missing
1323 if f in changes[3]: # missing
1324 fail(f, _('file not found!'))
1324 fail(f, _('file not found!'))
1325 if f in vdirs: # visited directory
1325 if f in vdirs: # visited directory
1326 d = f + '/'
1326 d = f + '/'
1327 for mf in matched:
1327 for mf in matched:
1328 if mf.startswith(d):
1328 if mf.startswith(d):
1329 break
1329 break
1330 else:
1330 else:
1331 fail(f, _("no match under directory!"))
1331 fail(f, _("no match under directory!"))
1332 elif f not in self.dirstate:
1332 elif f not in self.dirstate:
1333 fail(f, _("file not tracked!"))
1333 fail(f, _("file not tracked!"))
1334
1334
1335 if (not force and not extra.get("close") and not merge
1335 if (not force and not extra.get("close") and not merge
1336 and not (changes[0] or changes[1] or changes[2])
1336 and not (changes[0] or changes[1] or changes[2])
1337 and wctx.branch() == wctx.p1().branch()):
1337 and wctx.branch() == wctx.p1().branch()):
1338 return None
1338 return None
1339
1339
1340 if merge and changes[3]:
1340 if merge and changes[3]:
1341 raise util.Abort(_("cannot commit merge with missing files"))
1341 raise util.Abort(_("cannot commit merge with missing files"))
1342
1342
1343 ms = mergemod.mergestate(self)
1343 ms = mergemod.mergestate(self)
1344 for f in changes[0]:
1344 for f in changes[0]:
1345 if f in ms and ms[f] == 'u':
1345 if f in ms and ms[f] == 'u':
1346 raise util.Abort(_("unresolved merge conflicts "
1346 raise util.Abort(_("unresolved merge conflicts "
1347 "(see hg help resolve)"))
1347 "(see hg help resolve)"))
1348
1348
1349 cctx = context.workingctx(self, text, user, date, extra, changes)
1349 cctx = context.workingctx(self, text, user, date, extra, changes)
1350 if editor:
1350 if editor:
1351 cctx._text = editor(self, cctx, subs)
1351 cctx._text = editor(self, cctx, subs)
1352 edited = (text != cctx._text)
1352 edited = (text != cctx._text)
1353
1353
1354 # commit subs and write new state
1354 # commit subs and write new state
1355 if subs:
1355 if subs:
1356 for s in sorted(commitsubs):
1356 for s in sorted(commitsubs):
1357 sub = wctx.sub(s)
1357 sub = wctx.sub(s)
1358 self.ui.status(_('committing subrepository %s\n') %
1358 self.ui.status(_('committing subrepository %s\n') %
1359 subrepo.subrelpath(sub))
1359 subrepo.subrelpath(sub))
1360 sr = sub.commit(cctx._text, user, date)
1360 sr = sub.commit(cctx._text, user, date)
1361 newstate[s] = (newstate[s][0], sr)
1361 newstate[s] = (newstate[s][0], sr)
1362 subrepo.writestate(self, newstate)
1362 subrepo.writestate(self, newstate)
1363
1363
1364 # Save commit message in case this transaction gets rolled back
1364 # Save commit message in case this transaction gets rolled back
1365 # (e.g. by a pretxncommit hook). Leave the content alone on
1365 # (e.g. by a pretxncommit hook). Leave the content alone on
1366 # the assumption that the user will use the same editor again.
1366 # the assumption that the user will use the same editor again.
1367 msgfn = self.savecommitmessage(cctx._text)
1367 msgfn = self.savecommitmessage(cctx._text)
1368
1368
1369 p1, p2 = self.dirstate.parents()
1369 p1, p2 = self.dirstate.parents()
1370 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1370 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1371 try:
1371 try:
1372 self.hook("precommit", throw=True, parent1=hookp1,
1372 self.hook("precommit", throw=True, parent1=hookp1,
1373 parent2=hookp2)
1373 parent2=hookp2)
1374 ret = self.commitctx(cctx, True)
1374 ret = self.commitctx(cctx, True)
1375 except: # re-raises
1375 except: # re-raises
1376 if edited:
1376 if edited:
1377 self.ui.write(
1377 self.ui.write(
1378 _('note: commit message saved in %s\n') % msgfn)
1378 _('note: commit message saved in %s\n') % msgfn)
1379 raise
1379 raise
1380
1380
1381 # update bookmarks, dirstate and mergestate
1381 # update bookmarks, dirstate and mergestate
1382 bookmarks.update(self, [p1, p2], ret)
1382 bookmarks.update(self, [p1, p2], ret)
1383 for f in changes[0] + changes[1]:
1383 for f in changes[0] + changes[1]:
1384 self.dirstate.normal(f)
1384 self.dirstate.normal(f)
1385 for f in changes[2]:
1385 for f in changes[2]:
1386 self.dirstate.drop(f)
1386 self.dirstate.drop(f)
1387 self.dirstate.setparents(ret)
1387 self.dirstate.setparents(ret)
1388 ms.reset()
1388 ms.reset()
1389 finally:
1389 finally:
1390 wlock.release()
1390 wlock.release()
1391
1391
1392 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1392 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1393 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1393 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1394 self._afterlock(commithook)
1394 self._afterlock(commithook)
1395 return ret
1395 return ret
1396
1396
1397 def commitctx(self, ctx, error=False):
1397 def commitctx(self, ctx, error=False):
1398 """Add a new revision to current repository.
1398 """Add a new revision to current repository.
1399 Revision information is passed via the context argument.
1399 Revision information is passed via the context argument.
1400 """
1400 """
1401
1401
1402 tr = lock = None
1402 tr = lock = None
1403 removed = list(ctx.removed())
1403 removed = list(ctx.removed())
1404 p1, p2 = ctx.p1(), ctx.p2()
1404 p1, p2 = ctx.p1(), ctx.p2()
1405 user = ctx.user()
1405 user = ctx.user()
1406
1406
1407 lock = self.lock()
1407 lock = self.lock()
1408 try:
1408 try:
1409 tr = self.transaction("commit")
1409 tr = self.transaction("commit")
1410 trp = weakref.proxy(tr)
1410 trp = weakref.proxy(tr)
1411
1411
1412 if ctx.files():
1412 if ctx.files():
1413 m1 = p1.manifest().copy()
1413 m1 = p1.manifest().copy()
1414 m2 = p2.manifest()
1414 m2 = p2.manifest()
1415
1415
1416 # check in files
1416 # check in files
1417 new = {}
1417 new = {}
1418 changed = []
1418 changed = []
1419 linkrev = len(self)
1419 linkrev = len(self)
1420 for f in sorted(ctx.modified() + ctx.added()):
1420 for f in sorted(ctx.modified() + ctx.added()):
1421 self.ui.note(f + "\n")
1421 self.ui.note(f + "\n")
1422 try:
1422 try:
1423 fctx = ctx[f]
1423 fctx = ctx[f]
1424 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1424 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1425 changed)
1425 changed)
1426 m1.set(f, fctx.flags())
1426 m1.set(f, fctx.flags())
1427 except OSError, inst:
1427 except OSError, inst:
1428 self.ui.warn(_("trouble committing %s!\n") % f)
1428 self.ui.warn(_("trouble committing %s!\n") % f)
1429 raise
1429 raise
1430 except IOError, inst:
1430 except IOError, inst:
1431 errcode = getattr(inst, 'errno', errno.ENOENT)
1431 errcode = getattr(inst, 'errno', errno.ENOENT)
1432 if error or errcode and errcode != errno.ENOENT:
1432 if error or errcode and errcode != errno.ENOENT:
1433 self.ui.warn(_("trouble committing %s!\n") % f)
1433 self.ui.warn(_("trouble committing %s!\n") % f)
1434 raise
1434 raise
1435 else:
1435 else:
1436 removed.append(f)
1436 removed.append(f)
1437
1437
1438 # update manifest
1438 # update manifest
1439 m1.update(new)
1439 m1.update(new)
1440 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1440 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1441 drop = [f for f in removed if f in m1]
1441 drop = [f for f in removed if f in m1]
1442 for f in drop:
1442 for f in drop:
1443 del m1[f]
1443 del m1[f]
1444 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1444 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1445 p2.manifestnode(), (new, drop))
1445 p2.manifestnode(), (new, drop))
1446 files = changed + removed
1446 files = changed + removed
1447 else:
1447 else:
1448 mn = p1.manifestnode()
1448 mn = p1.manifestnode()
1449 files = []
1449 files = []
1450
1450
1451 # update changelog
1451 # update changelog
1452 self.changelog.delayupdate()
1452 self.changelog.delayupdate()
1453 n = self.changelog.add(mn, files, ctx.description(),
1453 n = self.changelog.add(mn, files, ctx.description(),
1454 trp, p1.node(), p2.node(),
1454 trp, p1.node(), p2.node(),
1455 user, ctx.date(), ctx.extra().copy())
1455 user, ctx.date(), ctx.extra().copy())
1456 p = lambda: self.changelog.writepending() and self.root or ""
1456 p = lambda: self.changelog.writepending() and self.root or ""
1457 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1457 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1458 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1458 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1459 parent2=xp2, pending=p)
1459 parent2=xp2, pending=p)
1460 self.changelog.finalize(trp)
1460 self.changelog.finalize(trp)
1461 # set the new commit is proper phase
1461 # set the new commit is proper phase
1462 targetphase = phases.newcommitphase(self.ui)
1462 targetphase = phases.newcommitphase(self.ui)
1463 if targetphase:
1463 if targetphase:
1464 # retract boundary do not alter parent changeset.
1464 # retract boundary do not alter parent changeset.
1465 # if a parent have higher the resulting phase will
1465 # if a parent have higher the resulting phase will
1466 # be compliant anyway
1466 # be compliant anyway
1467 #
1467 #
1468 # if minimal phase was 0 we don't need to retract anything
1468 # if minimal phase was 0 we don't need to retract anything
1469 phases.retractboundary(self, targetphase, [n])
1469 phases.retractboundary(self, targetphase, [n])
1470 tr.close()
1470 tr.close()
1471 self.updatebranchcache()
1471 self.updatebranchcache()
1472 return n
1472 return n
1473 finally:
1473 finally:
1474 if tr:
1474 if tr:
1475 tr.release()
1475 tr.release()
1476 lock.release()
1476 lock.release()
1477
1477
1478 def destroyed(self, newheadnodes=None):
1478 def destroyed(self, newheadnodes=None):
1479 '''Inform the repository that nodes have been destroyed.
1479 '''Inform the repository that nodes have been destroyed.
1480 Intended for use by strip and rollback, so there's a common
1480 Intended for use by strip and rollback, so there's a common
1481 place for anything that has to be done after destroying history.
1481 place for anything that has to be done after destroying history.
1482
1482
1483 If you know the branchheadcache was uptodate before nodes were removed
1483 If you know the branchheadcache was uptodate before nodes were removed
1484 and you also know the set of candidate new heads that may have resulted
1484 and you also know the set of candidate new heads that may have resulted
1485 from the destruction, you can set newheadnodes. This will enable the
1485 from the destruction, you can set newheadnodes. This will enable the
1486 code to update the branchheads cache, rather than having future code
1486 code to update the branchheads cache, rather than having future code
1487 decide it's invalid and regenerating it from scratch.
1487 decide it's invalid and regenerating it from scratch.
1488 '''
1488 '''
1489 # If we have info, newheadnodes, on how to update the branch cache, do
1489 # If we have info, newheadnodes, on how to update the branch cache, do
1490 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1490 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1491 # will be caught the next time it is read.
1491 # will be caught the next time it is read.
1492 if newheadnodes:
1492 if newheadnodes:
1493 tiprev = len(self) - 1
1493 tiprev = len(self) - 1
1494 ctxgen = (self[node] for node in newheadnodes
1494 ctxgen = (self[node] for node in newheadnodes
1495 if self.changelog.hasnode(node))
1495 if self.changelog.hasnode(node))
1496 self._updatebranchcache(self._branchcache, ctxgen)
1496 self._updatebranchcache(self._branchcache, ctxgen)
1497 self._writebranchcache(self._branchcache, self.changelog.tip(),
1497 self._writebranchcache(self._branchcache, self.changelog.tip(),
1498 tiprev)
1498 tiprev)
1499
1499
1500 # Ensure the persistent tag cache is updated. Doing it now
1500 # Ensure the persistent tag cache is updated. Doing it now
1501 # means that the tag cache only has to worry about destroyed
1501 # means that the tag cache only has to worry about destroyed
1502 # heads immediately after a strip/rollback. That in turn
1502 # heads immediately after a strip/rollback. That in turn
1503 # guarantees that "cachetip == currenttip" (comparing both rev
1503 # guarantees that "cachetip == currenttip" (comparing both rev
1504 # and node) always means no nodes have been added or destroyed.
1504 # and node) always means no nodes have been added or destroyed.
1505
1505
1506 # XXX this is suboptimal when qrefresh'ing: we strip the current
1506 # XXX this is suboptimal when qrefresh'ing: we strip the current
1507 # head, refresh the tag cache, then immediately add a new head.
1507 # head, refresh the tag cache, then immediately add a new head.
1508 # But I think doing it this way is necessary for the "instant
1508 # But I think doing it this way is necessary for the "instant
1509 # tag cache retrieval" case to work.
1509 # tag cache retrieval" case to work.
1510 self.invalidatecaches()
1510 self.invalidatecaches()
1511
1511
1512 # Discard all cache entries to force reloading everything.
1512 # Discard all cache entries to force reloading everything.
1513 self._filecache.clear()
1513 self._filecache.clear()
1514
1514
1515 def walk(self, match, node=None):
1515 def walk(self, match, node=None):
1516 '''
1516 '''
1517 walk recursively through the directory tree or a given
1517 walk recursively through the directory tree or a given
1518 changeset, finding all files matched by the match
1518 changeset, finding all files matched by the match
1519 function
1519 function
1520 '''
1520 '''
1521 return self[node].walk(match)
1521 return self[node].walk(match)
1522
1522
1523 def status(self, node1='.', node2=None, match=None,
1523 def status(self, node1='.', node2=None, match=None,
1524 ignored=False, clean=False, unknown=False,
1524 ignored=False, clean=False, unknown=False,
1525 listsubrepos=False):
1525 listsubrepos=False):
1526 """return status of files between two nodes or node and working
1526 """return status of files between two nodes or node and working
1527 directory.
1527 directory.
1528
1528
1529 If node1 is None, use the first dirstate parent instead.
1529 If node1 is None, use the first dirstate parent instead.
1530 If node2 is None, compare node1 with working directory.
1530 If node2 is None, compare node1 with working directory.
1531 """
1531 """
1532
1532
1533 def mfmatches(ctx):
1533 def mfmatches(ctx):
1534 mf = ctx.manifest().copy()
1534 mf = ctx.manifest().copy()
1535 if match.always():
1535 if match.always():
1536 return mf
1536 return mf
1537 for fn in mf.keys():
1537 for fn in mf.keys():
1538 if not match(fn):
1538 if not match(fn):
1539 del mf[fn]
1539 del mf[fn]
1540 return mf
1540 return mf
1541
1541
1542 if isinstance(node1, context.changectx):
1542 if isinstance(node1, context.changectx):
1543 ctx1 = node1
1543 ctx1 = node1
1544 else:
1544 else:
1545 ctx1 = self[node1]
1545 ctx1 = self[node1]
1546 if isinstance(node2, context.changectx):
1546 if isinstance(node2, context.changectx):
1547 ctx2 = node2
1547 ctx2 = node2
1548 else:
1548 else:
1549 ctx2 = self[node2]
1549 ctx2 = self[node2]
1550
1550
1551 working = ctx2.rev() is None
1551 working = ctx2.rev() is None
1552 parentworking = working and ctx1 == self['.']
1552 parentworking = working and ctx1 == self['.']
1553 match = match or matchmod.always(self.root, self.getcwd())
1553 match = match or matchmod.always(self.root, self.getcwd())
1554 listignored, listclean, listunknown = ignored, clean, unknown
1554 listignored, listclean, listunknown = ignored, clean, unknown
1555
1555
1556 # load earliest manifest first for caching reasons
1556 # load earliest manifest first for caching reasons
1557 if not working and ctx2.rev() < ctx1.rev():
1557 if not working and ctx2.rev() < ctx1.rev():
1558 ctx2.manifest()
1558 ctx2.manifest()
1559
1559
1560 if not parentworking:
1560 if not parentworking:
1561 def bad(f, msg):
1561 def bad(f, msg):
1562 # 'f' may be a directory pattern from 'match.files()',
1562 # 'f' may be a directory pattern from 'match.files()',
1563 # so 'f not in ctx1' is not enough
1563 # so 'f not in ctx1' is not enough
1564 if f not in ctx1 and f not in ctx1.dirs():
1564 if f not in ctx1 and f not in ctx1.dirs():
1565 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1565 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1566 match.bad = bad
1566 match.bad = bad
1567
1567
1568 if working: # we need to scan the working dir
1568 if working: # we need to scan the working dir
1569 subrepos = []
1569 subrepos = []
1570 if '.hgsub' in self.dirstate:
1570 if '.hgsub' in self.dirstate:
1571 subrepos = ctx2.substate.keys()
1571 subrepos = ctx2.substate.keys()
1572 s = self.dirstate.status(match, subrepos, listignored,
1572 s = self.dirstate.status(match, subrepos, listignored,
1573 listclean, listunknown)
1573 listclean, listunknown)
1574 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1574 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1575
1575
1576 # check for any possibly clean files
1576 # check for any possibly clean files
1577 if parentworking and cmp:
1577 if parentworking and cmp:
1578 fixup = []
1578 fixup = []
1579 # do a full compare of any files that might have changed
1579 # do a full compare of any files that might have changed
1580 for f in sorted(cmp):
1580 for f in sorted(cmp):
1581 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1581 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1582 or ctx1[f].cmp(ctx2[f])):
1582 or ctx1[f].cmp(ctx2[f])):
1583 modified.append(f)
1583 modified.append(f)
1584 else:
1584 else:
1585 fixup.append(f)
1585 fixup.append(f)
1586
1586
1587 # update dirstate for files that are actually clean
1587 # update dirstate for files that are actually clean
1588 if fixup:
1588 if fixup:
1589 if listclean:
1589 if listclean:
1590 clean += fixup
1590 clean += fixup
1591
1591
1592 try:
1592 try:
1593 # updating the dirstate is optional
1593 # updating the dirstate is optional
1594 # so we don't wait on the lock
1594 # so we don't wait on the lock
1595 wlock = self.wlock(False)
1595 wlock = self.wlock(False)
1596 try:
1596 try:
1597 for f in fixup:
1597 for f in fixup:
1598 self.dirstate.normal(f)
1598 self.dirstate.normal(f)
1599 finally:
1599 finally:
1600 wlock.release()
1600 wlock.release()
1601 except error.LockError:
1601 except error.LockError:
1602 pass
1602 pass
1603
1603
1604 if not parentworking:
1604 if not parentworking:
1605 mf1 = mfmatches(ctx1)
1605 mf1 = mfmatches(ctx1)
1606 if working:
1606 if working:
1607 # we are comparing working dir against non-parent
1607 # we are comparing working dir against non-parent
1608 # generate a pseudo-manifest for the working dir
1608 # generate a pseudo-manifest for the working dir
1609 mf2 = mfmatches(self['.'])
1609 mf2 = mfmatches(self['.'])
1610 for f in cmp + modified + added:
1610 for f in cmp + modified + added:
1611 mf2[f] = None
1611 mf2[f] = None
1612 mf2.set(f, ctx2.flags(f))
1612 mf2.set(f, ctx2.flags(f))
1613 for f in removed:
1613 for f in removed:
1614 if f in mf2:
1614 if f in mf2:
1615 del mf2[f]
1615 del mf2[f]
1616 else:
1616 else:
1617 # we are comparing two revisions
1617 # we are comparing two revisions
1618 deleted, unknown, ignored = [], [], []
1618 deleted, unknown, ignored = [], [], []
1619 mf2 = mfmatches(ctx2)
1619 mf2 = mfmatches(ctx2)
1620
1620
1621 modified, added, clean = [], [], []
1621 modified, added, clean = [], [], []
1622 withflags = mf1.withflags() | mf2.withflags()
1622 withflags = mf1.withflags() | mf2.withflags()
1623 for fn in mf2:
1623 for fn in mf2:
1624 if fn in mf1:
1624 if fn in mf1:
1625 if (fn not in deleted and
1625 if (fn not in deleted and
1626 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1626 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1627 (mf1[fn] != mf2[fn] and
1627 (mf1[fn] != mf2[fn] and
1628 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1628 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1629 modified.append(fn)
1629 modified.append(fn)
1630 elif listclean:
1630 elif listclean:
1631 clean.append(fn)
1631 clean.append(fn)
1632 del mf1[fn]
1632 del mf1[fn]
1633 elif fn not in deleted:
1633 elif fn not in deleted:
1634 added.append(fn)
1634 added.append(fn)
1635 removed = mf1.keys()
1635 removed = mf1.keys()
1636
1636
1637 if working and modified and not self.dirstate._checklink:
1637 if working and modified and not self.dirstate._checklink:
1638 # Symlink placeholders may get non-symlink-like contents
1638 # Symlink placeholders may get non-symlink-like contents
1639 # via user error or dereferencing by NFS or Samba servers,
1639 # via user error or dereferencing by NFS or Samba servers,
1640 # so we filter out any placeholders that don't look like a
1640 # so we filter out any placeholders that don't look like a
1641 # symlink
1641 # symlink
1642 sane = []
1642 sane = []
1643 for f in modified:
1643 for f in modified:
1644 if ctx2.flags(f) == 'l':
1644 if ctx2.flags(f) == 'l':
1645 d = ctx2[f].data()
1645 d = ctx2[f].data()
1646 if len(d) >= 1024 or '\n' in d or util.binary(d):
1646 if len(d) >= 1024 or '\n' in d or util.binary(d):
1647 self.ui.debug('ignoring suspect symlink placeholder'
1647 self.ui.debug('ignoring suspect symlink placeholder'
1648 ' "%s"\n' % f)
1648 ' "%s"\n' % f)
1649 continue
1649 continue
1650 sane.append(f)
1650 sane.append(f)
1651 modified = sane
1651 modified = sane
1652
1652
1653 r = modified, added, removed, deleted, unknown, ignored, clean
1653 r = modified, added, removed, deleted, unknown, ignored, clean
1654
1654
1655 if listsubrepos:
1655 if listsubrepos:
1656 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1656 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1657 if working:
1657 if working:
1658 rev2 = None
1658 rev2 = None
1659 else:
1659 else:
1660 rev2 = ctx2.substate[subpath][1]
1660 rev2 = ctx2.substate[subpath][1]
1661 try:
1661 try:
1662 submatch = matchmod.narrowmatcher(subpath, match)
1662 submatch = matchmod.narrowmatcher(subpath, match)
1663 s = sub.status(rev2, match=submatch, ignored=listignored,
1663 s = sub.status(rev2, match=submatch, ignored=listignored,
1664 clean=listclean, unknown=listunknown,
1664 clean=listclean, unknown=listunknown,
1665 listsubrepos=True)
1665 listsubrepos=True)
1666 for rfiles, sfiles in zip(r, s):
1666 for rfiles, sfiles in zip(r, s):
1667 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1667 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1668 except error.LookupError:
1668 except error.LookupError:
1669 self.ui.status(_("skipping missing subrepository: %s\n")
1669 self.ui.status(_("skipping missing subrepository: %s\n")
1670 % subpath)
1670 % subpath)
1671
1671
1672 for l in r:
1672 for l in r:
1673 l.sort()
1673 l.sort()
1674 return r
1674 return r
1675
1675
1676 def heads(self, start=None):
1676 def heads(self, start=None):
1677 heads = self.changelog.heads(start)
1677 heads = self.changelog.heads(start)
1678 # sort the output in rev descending order
1678 # sort the output in rev descending order
1679 return sorted(heads, key=self.changelog.rev, reverse=True)
1679 return sorted(heads, key=self.changelog.rev, reverse=True)
1680
1680
1681 def branchheads(self, branch=None, start=None, closed=False):
1681 def branchheads(self, branch=None, start=None, closed=False):
1682 '''return a (possibly filtered) list of heads for the given branch
1682 '''return a (possibly filtered) list of heads for the given branch
1683
1683
1684 Heads are returned in topological order, from newest to oldest.
1684 Heads are returned in topological order, from newest to oldest.
1685 If branch is None, use the dirstate branch.
1685 If branch is None, use the dirstate branch.
1686 If start is not None, return only heads reachable from start.
1686 If start is not None, return only heads reachable from start.
1687 If closed is True, return heads that are marked as closed as well.
1687 If closed is True, return heads that are marked as closed as well.
1688 '''
1688 '''
1689 if branch is None:
1689 if branch is None:
1690 branch = self[None].branch()
1690 branch = self[None].branch()
1691 branches = self.branchmap()
1691 branches = self.branchmap()
1692 if branch not in branches:
1692 if branch not in branches:
1693 return []
1693 return []
1694 # the cache returns heads ordered lowest to highest
1694 # the cache returns heads ordered lowest to highest
1695 bheads = list(reversed(branches[branch]))
1695 bheads = list(reversed(branches[branch]))
1696 if start is not None:
1696 if start is not None:
1697 # filter out the heads that cannot be reached from startrev
1697 # filter out the heads that cannot be reached from startrev
1698 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1698 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1699 bheads = [h for h in bheads if h in fbheads]
1699 bheads = [h for h in bheads if h in fbheads]
1700 if not closed:
1700 if not closed:
1701 bheads = [h for h in bheads if not self[h].closesbranch()]
1701 bheads = [h for h in bheads if not self[h].closesbranch()]
1702 return bheads
1702 return bheads
1703
1703
1704 def branches(self, nodes):
1704 def branches(self, nodes):
1705 if not nodes:
1705 if not nodes:
1706 nodes = [self.changelog.tip()]
1706 nodes = [self.changelog.tip()]
1707 b = []
1707 b = []
1708 for n in nodes:
1708 for n in nodes:
1709 t = n
1709 t = n
1710 while True:
1710 while True:
1711 p = self.changelog.parents(n)
1711 p = self.changelog.parents(n)
1712 if p[1] != nullid or p[0] == nullid:
1712 if p[1] != nullid or p[0] == nullid:
1713 b.append((t, n, p[0], p[1]))
1713 b.append((t, n, p[0], p[1]))
1714 break
1714 break
1715 n = p[0]
1715 n = p[0]
1716 return b
1716 return b
1717
1717
1718 def between(self, pairs):
1718 def between(self, pairs):
1719 r = []
1719 r = []
1720
1720
1721 for top, bottom in pairs:
1721 for top, bottom in pairs:
1722 n, l, i = top, [], 0
1722 n, l, i = top, [], 0
1723 f = 1
1723 f = 1
1724
1724
1725 while n != bottom and n != nullid:
1725 while n != bottom and n != nullid:
1726 p = self.changelog.parents(n)[0]
1726 p = self.changelog.parents(n)[0]
1727 if i == f:
1727 if i == f:
1728 l.append(n)
1728 l.append(n)
1729 f = f * 2
1729 f = f * 2
1730 n = p
1730 n = p
1731 i += 1
1731 i += 1
1732
1732
1733 r.append(l)
1733 r.append(l)
1734
1734
1735 return r
1735 return r
1736
1736
1737 def pull(self, remote, heads=None, force=False):
1737 def pull(self, remote, heads=None, force=False):
1738 # don't open transaction for nothing or you break future useful
1738 # don't open transaction for nothing or you break future useful
1739 # rollback call
1739 # rollback call
1740 tr = None
1740 tr = None
1741 trname = 'pull\n' + util.hidepassword(remote.url())
1741 trname = 'pull\n' + util.hidepassword(remote.url())
1742 lock = self.lock()
1742 lock = self.lock()
1743 try:
1743 try:
1744 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1744 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1745 force=force)
1745 force=force)
1746 common, fetch, rheads = tmp
1746 common, fetch, rheads = tmp
1747 if not fetch:
1747 if not fetch:
1748 self.ui.status(_("no changes found\n"))
1748 self.ui.status(_("no changes found\n"))
1749 added = []
1749 added = []
1750 result = 0
1750 result = 0
1751 else:
1751 else:
1752 tr = self.transaction(trname)
1752 tr = self.transaction(trname)
1753 if heads is None and list(common) == [nullid]:
1753 if heads is None and list(common) == [nullid]:
1754 self.ui.status(_("requesting all changes\n"))
1754 self.ui.status(_("requesting all changes\n"))
1755 elif heads is None and remote.capable('changegroupsubset'):
1755 elif heads is None and remote.capable('changegroupsubset'):
1756 # issue1320, avoid a race if remote changed after discovery
1756 # issue1320, avoid a race if remote changed after discovery
1757 heads = rheads
1757 heads = rheads
1758
1758
1759 if remote.capable('getbundle'):
1759 if remote.capable('getbundle'):
1760 cg = remote.getbundle('pull', common=common,
1760 cg = remote.getbundle('pull', common=common,
1761 heads=heads or rheads)
1761 heads=heads or rheads)
1762 elif heads is None:
1762 elif heads is None:
1763 cg = remote.changegroup(fetch, 'pull')
1763 cg = remote.changegroup(fetch, 'pull')
1764 elif not remote.capable('changegroupsubset'):
1764 elif not remote.capable('changegroupsubset'):
1765 raise util.Abort(_("partial pull cannot be done because "
1765 raise util.Abort(_("partial pull cannot be done because "
1766 "other repository doesn't support "
1766 "other repository doesn't support "
1767 "changegroupsubset."))
1767 "changegroupsubset."))
1768 else:
1768 else:
1769 cg = remote.changegroupsubset(fetch, heads, 'pull')
1769 cg = remote.changegroupsubset(fetch, heads, 'pull')
1770 clstart = len(self.changelog)
1770 clstart = len(self.changelog)
1771 result = self.addchangegroup(cg, 'pull', remote.url())
1771 result = self.addchangegroup(cg, 'pull', remote.url())
1772 clend = len(self.changelog)
1772 clend = len(self.changelog)
1773 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1773 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1774
1774
1775 # compute target subset
1775 # compute target subset
1776 if heads is None:
1776 if heads is None:
1777 # We pulled every thing possible
1777 # We pulled every thing possible
1778 # sync on everything common
1778 # sync on everything common
1779 subset = common + added
1779 subset = common + added
1780 else:
1780 else:
1781 # We pulled a specific subset
1781 # We pulled a specific subset
1782 # sync on this subset
1782 # sync on this subset
1783 subset = heads
1783 subset = heads
1784
1784
1785 # Get remote phases data from remote
1785 # Get remote phases data from remote
1786 remotephases = remote.listkeys('phases')
1786 remotephases = remote.listkeys('phases')
1787 publishing = bool(remotephases.get('publishing', False))
1787 publishing = bool(remotephases.get('publishing', False))
1788 if remotephases and not publishing:
1788 if remotephases and not publishing:
1789 # remote is new and unpublishing
1789 # remote is new and unpublishing
1790 pheads, _dr = phases.analyzeremotephases(self, subset,
1790 pheads, _dr = phases.analyzeremotephases(self, subset,
1791 remotephases)
1791 remotephases)
1792 phases.advanceboundary(self, phases.public, pheads)
1792 phases.advanceboundary(self, phases.public, pheads)
1793 phases.advanceboundary(self, phases.draft, subset)
1793 phases.advanceboundary(self, phases.draft, subset)
1794 else:
1794 else:
1795 # Remote is old or publishing all common changesets
1795 # Remote is old or publishing all common changesets
1796 # should be seen as public
1796 # should be seen as public
1797 phases.advanceboundary(self, phases.public, subset)
1797 phases.advanceboundary(self, phases.public, subset)
1798
1798
1799 if obsolete._enabled:
1799 if obsolete._enabled:
1800 self.ui.debug('fetching remote obsolete markers')
1800 self.ui.debug('fetching remote obsolete markers')
1801 remoteobs = remote.listkeys('obsolete')
1801 remoteobs = remote.listkeys('obsolete')
1802 if 'dump0' in remoteobs:
1802 if 'dump0' in remoteobs:
1803 if tr is None:
1803 if tr is None:
1804 tr = self.transaction(trname)
1804 tr = self.transaction(trname)
1805 for key in sorted(remoteobs, reverse=True):
1805 for key in sorted(remoteobs, reverse=True):
1806 if key.startswith('dump'):
1806 if key.startswith('dump'):
1807 data = base85.b85decode(remoteobs[key])
1807 data = base85.b85decode(remoteobs[key])
1808 self.obsstore.mergemarkers(tr, data)
1808 self.obsstore.mergemarkers(tr, data)
1809 if tr is not None:
1809 if tr is not None:
1810 tr.close()
1810 tr.close()
1811 finally:
1811 finally:
1812 if tr is not None:
1812 if tr is not None:
1813 tr.release()
1813 tr.release()
1814 lock.release()
1814 lock.release()
1815
1815
1816 return result
1816 return result
1817
1817
1818 def checkpush(self, force, revs):
1818 def checkpush(self, force, revs):
1819 """Extensions can override this function if additional checks have
1819 """Extensions can override this function if additional checks have
1820 to be performed before pushing, or call it if they override push
1820 to be performed before pushing, or call it if they override push
1821 command.
1821 command.
1822 """
1822 """
1823 pass
1823 pass
1824
1824
1825 def push(self, remote, force=False, revs=None, newbranch=False):
1825 def push(self, remote, force=False, revs=None, newbranch=False):
1826 '''Push outgoing changesets (limited by revs) from the current
1826 '''Push outgoing changesets (limited by revs) from the current
1827 repository to remote. Return an integer:
1827 repository to remote. Return an integer:
1828 - None means nothing to push
1828 - None means nothing to push
1829 - 0 means HTTP error
1829 - 0 means HTTP error
1830 - 1 means we pushed and remote head count is unchanged *or*
1830 - 1 means we pushed and remote head count is unchanged *or*
1831 we have outgoing changesets but refused to push
1831 we have outgoing changesets but refused to push
1832 - other values as described by addchangegroup()
1832 - other values as described by addchangegroup()
1833 '''
1833 '''
1834 # there are two ways to push to remote repo:
1834 # there are two ways to push to remote repo:
1835 #
1835 #
1836 # addchangegroup assumes local user can lock remote
1836 # addchangegroup assumes local user can lock remote
1837 # repo (local filesystem, old ssh servers).
1837 # repo (local filesystem, old ssh servers).
1838 #
1838 #
1839 # unbundle assumes local user cannot lock remote repo (new ssh
1839 # unbundle assumes local user cannot lock remote repo (new ssh
1840 # servers, http servers).
1840 # servers, http servers).
1841
1841
1842 if not remote.canpush():
1842 if not remote.canpush():
1843 raise util.Abort(_("destination does not support push"))
1843 raise util.Abort(_("destination does not support push"))
1844 # get local lock as we might write phase data
1844 # get local lock as we might write phase data
1845 locallock = self.lock()
1845 locallock = self.lock()
1846 try:
1846 try:
1847 self.checkpush(force, revs)
1847 self.checkpush(force, revs)
1848 lock = None
1848 lock = None
1849 unbundle = remote.capable('unbundle')
1849 unbundle = remote.capable('unbundle')
1850 if not unbundle:
1850 if not unbundle:
1851 lock = remote.lock()
1851 lock = remote.lock()
1852 try:
1852 try:
1853 # discovery
1853 # discovery
1854 fci = discovery.findcommonincoming
1854 fci = discovery.findcommonincoming
1855 commoninc = fci(self, remote, force=force)
1855 commoninc = fci(self, remote, force=force)
1856 common, inc, remoteheads = commoninc
1856 common, inc, remoteheads = commoninc
1857 fco = discovery.findcommonoutgoing
1857 fco = discovery.findcommonoutgoing
1858 outgoing = fco(self, remote, onlyheads=revs,
1858 outgoing = fco(self, remote, onlyheads=revs,
1859 commoninc=commoninc, force=force)
1859 commoninc=commoninc, force=force)
1860
1860
1861
1861
1862 if not outgoing.missing:
1862 if not outgoing.missing:
1863 # nothing to push
1863 # nothing to push
1864 scmutil.nochangesfound(self.ui, self, outgoing.excluded)
1864 scmutil.nochangesfound(self.ui, self, outgoing.excluded)
1865 ret = None
1865 ret = None
1866 else:
1866 else:
1867 # something to push
1867 # something to push
1868 if not force:
1868 if not force:
1869 # if self.obsstore == False --> no obsolete
1869 # if self.obsstore == False --> no obsolete
1870 # then, save the iteration
1870 # then, save the iteration
1871 if self.obsstore:
1871 if self.obsstore:
1872 # this message are here for 80 char limit reason
1872 # this message are here for 80 char limit reason
1873 mso = _("push includes obsolete changeset: %s!")
1873 mso = _("push includes obsolete changeset: %s!")
1874 msu = _("push includes unstable changeset: %s!")
1874 msu = _("push includes unstable changeset: %s!")
1875 msb = _("push includes bumped changeset: %s!")
1875 msb = _("push includes bumped changeset: %s!")
1876 # If we are to push if there is at least one
1876 # If we are to push if there is at least one
1877 # obsolete or unstable changeset in missing, at
1877 # obsolete or unstable changeset in missing, at
1878 # least one of the missinghead will be obsolete or
1878 # least one of the missinghead will be obsolete or
1879 # unstable. So checking heads only is ok
1879 # unstable. So checking heads only is ok
1880 for node in outgoing.missingheads:
1880 for node in outgoing.missingheads:
1881 ctx = self[node]
1881 ctx = self[node]
1882 if ctx.obsolete():
1882 if ctx.obsolete():
1883 raise util.Abort(_(mso) % ctx)
1883 raise util.Abort(mso % ctx)
1884 elif ctx.unstable():
1884 elif ctx.unstable():
1885 raise util.Abort(_(msu) % ctx)
1885 raise util.Abort(msu % ctx)
1886 elif ctx.bumped():
1886 elif ctx.bumped():
1887 raise util.Abort(_(msb) % ctx)
1887 raise util.Abort(msb % ctx)
1888 discovery.checkheads(self, remote, outgoing,
1888 discovery.checkheads(self, remote, outgoing,
1889 remoteheads, newbranch,
1889 remoteheads, newbranch,
1890 bool(inc))
1890 bool(inc))
1891
1891
1892 # create a changegroup from local
1892 # create a changegroup from local
1893 if revs is None and not outgoing.excluded:
1893 if revs is None and not outgoing.excluded:
1894 # push everything,
1894 # push everything,
1895 # use the fast path, no race possible on push
1895 # use the fast path, no race possible on push
1896 cg = self._changegroup(outgoing.missing, 'push')
1896 cg = self._changegroup(outgoing.missing, 'push')
1897 else:
1897 else:
1898 cg = self.getlocalbundle('push', outgoing)
1898 cg = self.getlocalbundle('push', outgoing)
1899
1899
1900 # apply changegroup to remote
1900 # apply changegroup to remote
1901 if unbundle:
1901 if unbundle:
1902 # local repo finds heads on server, finds out what
1902 # local repo finds heads on server, finds out what
1903 # revs it must push. once revs transferred, if server
1903 # revs it must push. once revs transferred, if server
1904 # finds it has different heads (someone else won
1904 # finds it has different heads (someone else won
1905 # commit/push race), server aborts.
1905 # commit/push race), server aborts.
1906 if force:
1906 if force:
1907 remoteheads = ['force']
1907 remoteheads = ['force']
1908 # ssh: return remote's addchangegroup()
1908 # ssh: return remote's addchangegroup()
1909 # http: return remote's addchangegroup() or 0 for error
1909 # http: return remote's addchangegroup() or 0 for error
1910 ret = remote.unbundle(cg, remoteheads, 'push')
1910 ret = remote.unbundle(cg, remoteheads, 'push')
1911 else:
1911 else:
1912 # we return an integer indicating remote head count
1912 # we return an integer indicating remote head count
1913 # change
1913 # change
1914 ret = remote.addchangegroup(cg, 'push', self.url())
1914 ret = remote.addchangegroup(cg, 'push', self.url())
1915
1915
1916 if ret:
1916 if ret:
1917 # push succeed, synchronize target of the push
1917 # push succeed, synchronize target of the push
1918 cheads = outgoing.missingheads
1918 cheads = outgoing.missingheads
1919 elif revs is None:
1919 elif revs is None:
1920 # All out push fails. synchronize all common
1920 # All out push fails. synchronize all common
1921 cheads = outgoing.commonheads
1921 cheads = outgoing.commonheads
1922 else:
1922 else:
1923 # I want cheads = heads(::missingheads and ::commonheads)
1923 # I want cheads = heads(::missingheads and ::commonheads)
1924 # (missingheads is revs with secret changeset filtered out)
1924 # (missingheads is revs with secret changeset filtered out)
1925 #
1925 #
1926 # This can be expressed as:
1926 # This can be expressed as:
1927 # cheads = ( (missingheads and ::commonheads)
1927 # cheads = ( (missingheads and ::commonheads)
1928 # + (commonheads and ::missingheads))"
1928 # + (commonheads and ::missingheads))"
1929 # )
1929 # )
1930 #
1930 #
1931 # while trying to push we already computed the following:
1931 # while trying to push we already computed the following:
1932 # common = (::commonheads)
1932 # common = (::commonheads)
1933 # missing = ((commonheads::missingheads) - commonheads)
1933 # missing = ((commonheads::missingheads) - commonheads)
1934 #
1934 #
1935 # We can pick:
1935 # We can pick:
1936 # * missingheads part of common (::commonheads)
1936 # * missingheads part of common (::commonheads)
1937 common = set(outgoing.common)
1937 common = set(outgoing.common)
1938 cheads = [node for node in revs if node in common]
1938 cheads = [node for node in revs if node in common]
1939 # and
1939 # and
1940 # * commonheads parents on missing
1940 # * commonheads parents on missing
1941 revset = self.set('%ln and parents(roots(%ln))',
1941 revset = self.set('%ln and parents(roots(%ln))',
1942 outgoing.commonheads,
1942 outgoing.commonheads,
1943 outgoing.missing)
1943 outgoing.missing)
1944 cheads.extend(c.node() for c in revset)
1944 cheads.extend(c.node() for c in revset)
1945 # even when we don't push, exchanging phase data is useful
1945 # even when we don't push, exchanging phase data is useful
1946 remotephases = remote.listkeys('phases')
1946 remotephases = remote.listkeys('phases')
1947 if not remotephases: # old server or public only repo
1947 if not remotephases: # old server or public only repo
1948 phases.advanceboundary(self, phases.public, cheads)
1948 phases.advanceboundary(self, phases.public, cheads)
1949 # don't push any phase data as there is nothing to push
1949 # don't push any phase data as there is nothing to push
1950 else:
1950 else:
1951 ana = phases.analyzeremotephases(self, cheads, remotephases)
1951 ana = phases.analyzeremotephases(self, cheads, remotephases)
1952 pheads, droots = ana
1952 pheads, droots = ana
1953 ### Apply remote phase on local
1953 ### Apply remote phase on local
1954 if remotephases.get('publishing', False):
1954 if remotephases.get('publishing', False):
1955 phases.advanceboundary(self, phases.public, cheads)
1955 phases.advanceboundary(self, phases.public, cheads)
1956 else: # publish = False
1956 else: # publish = False
1957 phases.advanceboundary(self, phases.public, pheads)
1957 phases.advanceboundary(self, phases.public, pheads)
1958 phases.advanceboundary(self, phases.draft, cheads)
1958 phases.advanceboundary(self, phases.draft, cheads)
1959 ### Apply local phase on remote
1959 ### Apply local phase on remote
1960
1960
1961 # Get the list of all revs draft on remote by public here.
1961 # Get the list of all revs draft on remote by public here.
1962 # XXX Beware that revset break if droots is not strictly
1962 # XXX Beware that revset break if droots is not strictly
1963 # XXX root we may want to ensure it is but it is costly
1963 # XXX root we may want to ensure it is but it is costly
1964 outdated = self.set('heads((%ln::%ln) and public())',
1964 outdated = self.set('heads((%ln::%ln) and public())',
1965 droots, cheads)
1965 droots, cheads)
1966 for newremotehead in outdated:
1966 for newremotehead in outdated:
1967 r = remote.pushkey('phases',
1967 r = remote.pushkey('phases',
1968 newremotehead.hex(),
1968 newremotehead.hex(),
1969 str(phases.draft),
1969 str(phases.draft),
1970 str(phases.public))
1970 str(phases.public))
1971 if not r:
1971 if not r:
1972 self.ui.warn(_('updating %s to public failed!\n')
1972 self.ui.warn(_('updating %s to public failed!\n')
1973 % newremotehead)
1973 % newremotehead)
1974 self.ui.debug('try to push obsolete markers to remote\n')
1974 self.ui.debug('try to push obsolete markers to remote\n')
1975 if (obsolete._enabled and self.obsstore and
1975 if (obsolete._enabled and self.obsstore and
1976 'obsolete' in remote.listkeys('namespaces')):
1976 'obsolete' in remote.listkeys('namespaces')):
1977 rslts = []
1977 rslts = []
1978 remotedata = self.listkeys('obsolete')
1978 remotedata = self.listkeys('obsolete')
1979 for key in sorted(remotedata, reverse=True):
1979 for key in sorted(remotedata, reverse=True):
1980 # reverse sort to ensure we end with dump0
1980 # reverse sort to ensure we end with dump0
1981 data = remotedata[key]
1981 data = remotedata[key]
1982 rslts.append(remote.pushkey('obsolete', key, '', data))
1982 rslts.append(remote.pushkey('obsolete', key, '', data))
1983 if [r for r in rslts if not r]:
1983 if [r for r in rslts if not r]:
1984 msg = _('failed to push some obsolete markers!\n')
1984 msg = _('failed to push some obsolete markers!\n')
1985 self.ui.warn(msg)
1985 self.ui.warn(msg)
1986 finally:
1986 finally:
1987 if lock is not None:
1987 if lock is not None:
1988 lock.release()
1988 lock.release()
1989 finally:
1989 finally:
1990 locallock.release()
1990 locallock.release()
1991
1991
1992 self.ui.debug("checking for updated bookmarks\n")
1992 self.ui.debug("checking for updated bookmarks\n")
1993 rb = remote.listkeys('bookmarks')
1993 rb = remote.listkeys('bookmarks')
1994 for k in rb.keys():
1994 for k in rb.keys():
1995 if k in self._bookmarks:
1995 if k in self._bookmarks:
1996 nr, nl = rb[k], hex(self._bookmarks[k])
1996 nr, nl = rb[k], hex(self._bookmarks[k])
1997 if nr in self:
1997 if nr in self:
1998 cr = self[nr]
1998 cr = self[nr]
1999 cl = self[nl]
1999 cl = self[nl]
2000 if bookmarks.validdest(self, cr, cl):
2000 if bookmarks.validdest(self, cr, cl):
2001 r = remote.pushkey('bookmarks', k, nr, nl)
2001 r = remote.pushkey('bookmarks', k, nr, nl)
2002 if r:
2002 if r:
2003 self.ui.status(_("updating bookmark %s\n") % k)
2003 self.ui.status(_("updating bookmark %s\n") % k)
2004 else:
2004 else:
2005 self.ui.warn(_('updating bookmark %s'
2005 self.ui.warn(_('updating bookmark %s'
2006 ' failed!\n') % k)
2006 ' failed!\n') % k)
2007
2007
2008 return ret
2008 return ret
2009
2009
2010 def changegroupinfo(self, nodes, source):
2010 def changegroupinfo(self, nodes, source):
2011 if self.ui.verbose or source == 'bundle':
2011 if self.ui.verbose or source == 'bundle':
2012 self.ui.status(_("%d changesets found\n") % len(nodes))
2012 self.ui.status(_("%d changesets found\n") % len(nodes))
2013 if self.ui.debugflag:
2013 if self.ui.debugflag:
2014 self.ui.debug("list of changesets:\n")
2014 self.ui.debug("list of changesets:\n")
2015 for node in nodes:
2015 for node in nodes:
2016 self.ui.debug("%s\n" % hex(node))
2016 self.ui.debug("%s\n" % hex(node))
2017
2017
2018 def changegroupsubset(self, bases, heads, source):
2018 def changegroupsubset(self, bases, heads, source):
2019 """Compute a changegroup consisting of all the nodes that are
2019 """Compute a changegroup consisting of all the nodes that are
2020 descendants of any of the bases and ancestors of any of the heads.
2020 descendants of any of the bases and ancestors of any of the heads.
2021 Return a chunkbuffer object whose read() method will return
2021 Return a chunkbuffer object whose read() method will return
2022 successive changegroup chunks.
2022 successive changegroup chunks.
2023
2023
2024 It is fairly complex as determining which filenodes and which
2024 It is fairly complex as determining which filenodes and which
2025 manifest nodes need to be included for the changeset to be complete
2025 manifest nodes need to be included for the changeset to be complete
2026 is non-trivial.
2026 is non-trivial.
2027
2027
2028 Another wrinkle is doing the reverse, figuring out which changeset in
2028 Another wrinkle is doing the reverse, figuring out which changeset in
2029 the changegroup a particular filenode or manifestnode belongs to.
2029 the changegroup a particular filenode or manifestnode belongs to.
2030 """
2030 """
2031 cl = self.changelog
2031 cl = self.changelog
2032 if not bases:
2032 if not bases:
2033 bases = [nullid]
2033 bases = [nullid]
2034 csets, bases, heads = cl.nodesbetween(bases, heads)
2034 csets, bases, heads = cl.nodesbetween(bases, heads)
2035 # We assume that all ancestors of bases are known
2035 # We assume that all ancestors of bases are known
2036 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2036 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2037 return self._changegroupsubset(common, csets, heads, source)
2037 return self._changegroupsubset(common, csets, heads, source)
2038
2038
2039 def getlocalbundle(self, source, outgoing):
2039 def getlocalbundle(self, source, outgoing):
2040 """Like getbundle, but taking a discovery.outgoing as an argument.
2040 """Like getbundle, but taking a discovery.outgoing as an argument.
2041
2041
2042 This is only implemented for local repos and reuses potentially
2042 This is only implemented for local repos and reuses potentially
2043 precomputed sets in outgoing."""
2043 precomputed sets in outgoing."""
2044 if not outgoing.missing:
2044 if not outgoing.missing:
2045 return None
2045 return None
2046 return self._changegroupsubset(outgoing.common,
2046 return self._changegroupsubset(outgoing.common,
2047 outgoing.missing,
2047 outgoing.missing,
2048 outgoing.missingheads,
2048 outgoing.missingheads,
2049 source)
2049 source)
2050
2050
2051 def getbundle(self, source, heads=None, common=None):
2051 def getbundle(self, source, heads=None, common=None):
2052 """Like changegroupsubset, but returns the set difference between the
2052 """Like changegroupsubset, but returns the set difference between the
2053 ancestors of heads and the ancestors common.
2053 ancestors of heads and the ancestors common.
2054
2054
2055 If heads is None, use the local heads. If common is None, use [nullid].
2055 If heads is None, use the local heads. If common is None, use [nullid].
2056
2056
2057 The nodes in common might not all be known locally due to the way the
2057 The nodes in common might not all be known locally due to the way the
2058 current discovery protocol works.
2058 current discovery protocol works.
2059 """
2059 """
2060 cl = self.changelog
2060 cl = self.changelog
2061 if common:
2061 if common:
2062 nm = cl.nodemap
2062 nm = cl.nodemap
2063 common = [n for n in common if n in nm]
2063 common = [n for n in common if n in nm]
2064 else:
2064 else:
2065 common = [nullid]
2065 common = [nullid]
2066 if not heads:
2066 if not heads:
2067 heads = cl.heads()
2067 heads = cl.heads()
2068 return self.getlocalbundle(source,
2068 return self.getlocalbundle(source,
2069 discovery.outgoing(cl, common, heads))
2069 discovery.outgoing(cl, common, heads))
2070
2070
2071 def _changegroupsubset(self, commonrevs, csets, heads, source):
2071 def _changegroupsubset(self, commonrevs, csets, heads, source):
2072
2072
2073 cl = self.changelog
2073 cl = self.changelog
2074 mf = self.manifest
2074 mf = self.manifest
2075 mfs = {} # needed manifests
2075 mfs = {} # needed manifests
2076 fnodes = {} # needed file nodes
2076 fnodes = {} # needed file nodes
2077 changedfiles = set()
2077 changedfiles = set()
2078 fstate = ['', {}]
2078 fstate = ['', {}]
2079 count = [0, 0]
2079 count = [0, 0]
2080
2080
2081 # can we go through the fast path ?
2081 # can we go through the fast path ?
2082 heads.sort()
2082 heads.sort()
2083 if heads == sorted(self.heads()):
2083 if heads == sorted(self.heads()):
2084 return self._changegroup(csets, source)
2084 return self._changegroup(csets, source)
2085
2085
2086 # slow path
2086 # slow path
2087 self.hook('preoutgoing', throw=True, source=source)
2087 self.hook('preoutgoing', throw=True, source=source)
2088 self.changegroupinfo(csets, source)
2088 self.changegroupinfo(csets, source)
2089
2089
2090 # filter any nodes that claim to be part of the known set
2090 # filter any nodes that claim to be part of the known set
2091 def prune(revlog, missing):
2091 def prune(revlog, missing):
2092 rr, rl = revlog.rev, revlog.linkrev
2092 rr, rl = revlog.rev, revlog.linkrev
2093 return [n for n in missing
2093 return [n for n in missing
2094 if rl(rr(n)) not in commonrevs]
2094 if rl(rr(n)) not in commonrevs]
2095
2095
2096 progress = self.ui.progress
2096 progress = self.ui.progress
2097 _bundling = _('bundling')
2097 _bundling = _('bundling')
2098 _changesets = _('changesets')
2098 _changesets = _('changesets')
2099 _manifests = _('manifests')
2099 _manifests = _('manifests')
2100 _files = _('files')
2100 _files = _('files')
2101
2101
2102 def lookup(revlog, x):
2102 def lookup(revlog, x):
2103 if revlog == cl:
2103 if revlog == cl:
2104 c = cl.read(x)
2104 c = cl.read(x)
2105 changedfiles.update(c[3])
2105 changedfiles.update(c[3])
2106 mfs.setdefault(c[0], x)
2106 mfs.setdefault(c[0], x)
2107 count[0] += 1
2107 count[0] += 1
2108 progress(_bundling, count[0],
2108 progress(_bundling, count[0],
2109 unit=_changesets, total=count[1])
2109 unit=_changesets, total=count[1])
2110 return x
2110 return x
2111 elif revlog == mf:
2111 elif revlog == mf:
2112 clnode = mfs[x]
2112 clnode = mfs[x]
2113 mdata = mf.readfast(x)
2113 mdata = mf.readfast(x)
2114 for f, n in mdata.iteritems():
2114 for f, n in mdata.iteritems():
2115 if f in changedfiles:
2115 if f in changedfiles:
2116 fnodes[f].setdefault(n, clnode)
2116 fnodes[f].setdefault(n, clnode)
2117 count[0] += 1
2117 count[0] += 1
2118 progress(_bundling, count[0],
2118 progress(_bundling, count[0],
2119 unit=_manifests, total=count[1])
2119 unit=_manifests, total=count[1])
2120 return clnode
2120 return clnode
2121 else:
2121 else:
2122 progress(_bundling, count[0], item=fstate[0],
2122 progress(_bundling, count[0], item=fstate[0],
2123 unit=_files, total=count[1])
2123 unit=_files, total=count[1])
2124 return fstate[1][x]
2124 return fstate[1][x]
2125
2125
2126 bundler = changegroup.bundle10(lookup)
2126 bundler = changegroup.bundle10(lookup)
2127 reorder = self.ui.config('bundle', 'reorder', 'auto')
2127 reorder = self.ui.config('bundle', 'reorder', 'auto')
2128 if reorder == 'auto':
2128 if reorder == 'auto':
2129 reorder = None
2129 reorder = None
2130 else:
2130 else:
2131 reorder = util.parsebool(reorder)
2131 reorder = util.parsebool(reorder)
2132
2132
2133 def gengroup():
2133 def gengroup():
2134 # Create a changenode group generator that will call our functions
2134 # Create a changenode group generator that will call our functions
2135 # back to lookup the owning changenode and collect information.
2135 # back to lookup the owning changenode and collect information.
2136 count[:] = [0, len(csets)]
2136 count[:] = [0, len(csets)]
2137 for chunk in cl.group(csets, bundler, reorder=reorder):
2137 for chunk in cl.group(csets, bundler, reorder=reorder):
2138 yield chunk
2138 yield chunk
2139 progress(_bundling, None)
2139 progress(_bundling, None)
2140
2140
2141 # Create a generator for the manifestnodes that calls our lookup
2141 # Create a generator for the manifestnodes that calls our lookup
2142 # and data collection functions back.
2142 # and data collection functions back.
2143 for f in changedfiles:
2143 for f in changedfiles:
2144 fnodes[f] = {}
2144 fnodes[f] = {}
2145 count[:] = [0, len(mfs)]
2145 count[:] = [0, len(mfs)]
2146 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2146 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2147 yield chunk
2147 yield chunk
2148 progress(_bundling, None)
2148 progress(_bundling, None)
2149
2149
2150 mfs.clear()
2150 mfs.clear()
2151
2151
2152 # Go through all our files in order sorted by name.
2152 # Go through all our files in order sorted by name.
2153 count[:] = [0, len(changedfiles)]
2153 count[:] = [0, len(changedfiles)]
2154 for fname in sorted(changedfiles):
2154 for fname in sorted(changedfiles):
2155 filerevlog = self.file(fname)
2155 filerevlog = self.file(fname)
2156 if not len(filerevlog):
2156 if not len(filerevlog):
2157 raise util.Abort(_("empty or missing revlog for %s")
2157 raise util.Abort(_("empty or missing revlog for %s")
2158 % fname)
2158 % fname)
2159 fstate[0] = fname
2159 fstate[0] = fname
2160 fstate[1] = fnodes.pop(fname, {})
2160 fstate[1] = fnodes.pop(fname, {})
2161
2161
2162 nodelist = prune(filerevlog, fstate[1])
2162 nodelist = prune(filerevlog, fstate[1])
2163 if nodelist:
2163 if nodelist:
2164 count[0] += 1
2164 count[0] += 1
2165 yield bundler.fileheader(fname)
2165 yield bundler.fileheader(fname)
2166 for chunk in filerevlog.group(nodelist, bundler, reorder):
2166 for chunk in filerevlog.group(nodelist, bundler, reorder):
2167 yield chunk
2167 yield chunk
2168
2168
2169 # Signal that no more groups are left.
2169 # Signal that no more groups are left.
2170 yield bundler.close()
2170 yield bundler.close()
2171 progress(_bundling, None)
2171 progress(_bundling, None)
2172
2172
2173 if csets:
2173 if csets:
2174 self.hook('outgoing', node=hex(csets[0]), source=source)
2174 self.hook('outgoing', node=hex(csets[0]), source=source)
2175
2175
2176 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2176 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2177
2177
2178 def changegroup(self, basenodes, source):
2178 def changegroup(self, basenodes, source):
2179 # to avoid a race we use changegroupsubset() (issue1320)
2179 # to avoid a race we use changegroupsubset() (issue1320)
2180 return self.changegroupsubset(basenodes, self.heads(), source)
2180 return self.changegroupsubset(basenodes, self.heads(), source)
2181
2181
2182 def _changegroup(self, nodes, source):
2182 def _changegroup(self, nodes, source):
2183 """Compute the changegroup of all nodes that we have that a recipient
2183 """Compute the changegroup of all nodes that we have that a recipient
2184 doesn't. Return a chunkbuffer object whose read() method will return
2184 doesn't. Return a chunkbuffer object whose read() method will return
2185 successive changegroup chunks.
2185 successive changegroup chunks.
2186
2186
2187 This is much easier than the previous function as we can assume that
2187 This is much easier than the previous function as we can assume that
2188 the recipient has any changenode we aren't sending them.
2188 the recipient has any changenode we aren't sending them.
2189
2189
2190 nodes is the set of nodes to send"""
2190 nodes is the set of nodes to send"""
2191
2191
2192 cl = self.changelog
2192 cl = self.changelog
2193 mf = self.manifest
2193 mf = self.manifest
2194 mfs = {}
2194 mfs = {}
2195 changedfiles = set()
2195 changedfiles = set()
2196 fstate = ['']
2196 fstate = ['']
2197 count = [0, 0]
2197 count = [0, 0]
2198
2198
2199 self.hook('preoutgoing', throw=True, source=source)
2199 self.hook('preoutgoing', throw=True, source=source)
2200 self.changegroupinfo(nodes, source)
2200 self.changegroupinfo(nodes, source)
2201
2201
2202 revset = set([cl.rev(n) for n in nodes])
2202 revset = set([cl.rev(n) for n in nodes])
2203
2203
2204 def gennodelst(log):
2204 def gennodelst(log):
2205 ln, llr = log.node, log.linkrev
2205 ln, llr = log.node, log.linkrev
2206 return [ln(r) for r in log if llr(r) in revset]
2206 return [ln(r) for r in log if llr(r) in revset]
2207
2207
2208 progress = self.ui.progress
2208 progress = self.ui.progress
2209 _bundling = _('bundling')
2209 _bundling = _('bundling')
2210 _changesets = _('changesets')
2210 _changesets = _('changesets')
2211 _manifests = _('manifests')
2211 _manifests = _('manifests')
2212 _files = _('files')
2212 _files = _('files')
2213
2213
2214 def lookup(revlog, x):
2214 def lookup(revlog, x):
2215 if revlog == cl:
2215 if revlog == cl:
2216 c = cl.read(x)
2216 c = cl.read(x)
2217 changedfiles.update(c[3])
2217 changedfiles.update(c[3])
2218 mfs.setdefault(c[0], x)
2218 mfs.setdefault(c[0], x)
2219 count[0] += 1
2219 count[0] += 1
2220 progress(_bundling, count[0],
2220 progress(_bundling, count[0],
2221 unit=_changesets, total=count[1])
2221 unit=_changesets, total=count[1])
2222 return x
2222 return x
2223 elif revlog == mf:
2223 elif revlog == mf:
2224 count[0] += 1
2224 count[0] += 1
2225 progress(_bundling, count[0],
2225 progress(_bundling, count[0],
2226 unit=_manifests, total=count[1])
2226 unit=_manifests, total=count[1])
2227 return cl.node(revlog.linkrev(revlog.rev(x)))
2227 return cl.node(revlog.linkrev(revlog.rev(x)))
2228 else:
2228 else:
2229 progress(_bundling, count[0], item=fstate[0],
2229 progress(_bundling, count[0], item=fstate[0],
2230 total=count[1], unit=_files)
2230 total=count[1], unit=_files)
2231 return cl.node(revlog.linkrev(revlog.rev(x)))
2231 return cl.node(revlog.linkrev(revlog.rev(x)))
2232
2232
2233 bundler = changegroup.bundle10(lookup)
2233 bundler = changegroup.bundle10(lookup)
2234 reorder = self.ui.config('bundle', 'reorder', 'auto')
2234 reorder = self.ui.config('bundle', 'reorder', 'auto')
2235 if reorder == 'auto':
2235 if reorder == 'auto':
2236 reorder = None
2236 reorder = None
2237 else:
2237 else:
2238 reorder = util.parsebool(reorder)
2238 reorder = util.parsebool(reorder)
2239
2239
2240 def gengroup():
2240 def gengroup():
2241 '''yield a sequence of changegroup chunks (strings)'''
2241 '''yield a sequence of changegroup chunks (strings)'''
2242 # construct a list of all changed files
2242 # construct a list of all changed files
2243
2243
2244 count[:] = [0, len(nodes)]
2244 count[:] = [0, len(nodes)]
2245 for chunk in cl.group(nodes, bundler, reorder=reorder):
2245 for chunk in cl.group(nodes, bundler, reorder=reorder):
2246 yield chunk
2246 yield chunk
2247 progress(_bundling, None)
2247 progress(_bundling, None)
2248
2248
2249 count[:] = [0, len(mfs)]
2249 count[:] = [0, len(mfs)]
2250 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2250 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2251 yield chunk
2251 yield chunk
2252 progress(_bundling, None)
2252 progress(_bundling, None)
2253
2253
2254 count[:] = [0, len(changedfiles)]
2254 count[:] = [0, len(changedfiles)]
2255 for fname in sorted(changedfiles):
2255 for fname in sorted(changedfiles):
2256 filerevlog = self.file(fname)
2256 filerevlog = self.file(fname)
2257 if not len(filerevlog):
2257 if not len(filerevlog):
2258 raise util.Abort(_("empty or missing revlog for %s")
2258 raise util.Abort(_("empty or missing revlog for %s")
2259 % fname)
2259 % fname)
2260 fstate[0] = fname
2260 fstate[0] = fname
2261 nodelist = gennodelst(filerevlog)
2261 nodelist = gennodelst(filerevlog)
2262 if nodelist:
2262 if nodelist:
2263 count[0] += 1
2263 count[0] += 1
2264 yield bundler.fileheader(fname)
2264 yield bundler.fileheader(fname)
2265 for chunk in filerevlog.group(nodelist, bundler, reorder):
2265 for chunk in filerevlog.group(nodelist, bundler, reorder):
2266 yield chunk
2266 yield chunk
2267 yield bundler.close()
2267 yield bundler.close()
2268 progress(_bundling, None)
2268 progress(_bundling, None)
2269
2269
2270 if nodes:
2270 if nodes:
2271 self.hook('outgoing', node=hex(nodes[0]), source=source)
2271 self.hook('outgoing', node=hex(nodes[0]), source=source)
2272
2272
2273 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2273 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2274
2274
2275 def addchangegroup(self, source, srctype, url, emptyok=False):
2275 def addchangegroup(self, source, srctype, url, emptyok=False):
2276 """Add the changegroup returned by source.read() to this repo.
2276 """Add the changegroup returned by source.read() to this repo.
2277 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2277 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2278 the URL of the repo where this changegroup is coming from.
2278 the URL of the repo where this changegroup is coming from.
2279
2279
2280 Return an integer summarizing the change to this repo:
2280 Return an integer summarizing the change to this repo:
2281 - nothing changed or no source: 0
2281 - nothing changed or no source: 0
2282 - more heads than before: 1+added heads (2..n)
2282 - more heads than before: 1+added heads (2..n)
2283 - fewer heads than before: -1-removed heads (-2..-n)
2283 - fewer heads than before: -1-removed heads (-2..-n)
2284 - number of heads stays the same: 1
2284 - number of heads stays the same: 1
2285 """
2285 """
2286 def csmap(x):
2286 def csmap(x):
2287 self.ui.debug("add changeset %s\n" % short(x))
2287 self.ui.debug("add changeset %s\n" % short(x))
2288 return len(cl)
2288 return len(cl)
2289
2289
2290 def revmap(x):
2290 def revmap(x):
2291 return cl.rev(x)
2291 return cl.rev(x)
2292
2292
2293 if not source:
2293 if not source:
2294 return 0
2294 return 0
2295
2295
2296 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2296 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2297
2297
2298 changesets = files = revisions = 0
2298 changesets = files = revisions = 0
2299 efiles = set()
2299 efiles = set()
2300
2300
2301 # write changelog data to temp files so concurrent readers will not see
2301 # write changelog data to temp files so concurrent readers will not see
2302 # inconsistent view
2302 # inconsistent view
2303 cl = self.changelog
2303 cl = self.changelog
2304 cl.delayupdate()
2304 cl.delayupdate()
2305 oldheads = cl.heads()
2305 oldheads = cl.heads()
2306
2306
2307 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2307 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2308 try:
2308 try:
2309 trp = weakref.proxy(tr)
2309 trp = weakref.proxy(tr)
2310 # pull off the changeset group
2310 # pull off the changeset group
2311 self.ui.status(_("adding changesets\n"))
2311 self.ui.status(_("adding changesets\n"))
2312 clstart = len(cl)
2312 clstart = len(cl)
2313 class prog(object):
2313 class prog(object):
2314 step = _('changesets')
2314 step = _('changesets')
2315 count = 1
2315 count = 1
2316 ui = self.ui
2316 ui = self.ui
2317 total = None
2317 total = None
2318 def __call__(self):
2318 def __call__(self):
2319 self.ui.progress(self.step, self.count, unit=_('chunks'),
2319 self.ui.progress(self.step, self.count, unit=_('chunks'),
2320 total=self.total)
2320 total=self.total)
2321 self.count += 1
2321 self.count += 1
2322 pr = prog()
2322 pr = prog()
2323 source.callback = pr
2323 source.callback = pr
2324
2324
2325 source.changelogheader()
2325 source.changelogheader()
2326 srccontent = cl.addgroup(source, csmap, trp)
2326 srccontent = cl.addgroup(source, csmap, trp)
2327 if not (srccontent or emptyok):
2327 if not (srccontent or emptyok):
2328 raise util.Abort(_("received changelog group is empty"))
2328 raise util.Abort(_("received changelog group is empty"))
2329 clend = len(cl)
2329 clend = len(cl)
2330 changesets = clend - clstart
2330 changesets = clend - clstart
2331 for c in xrange(clstart, clend):
2331 for c in xrange(clstart, clend):
2332 efiles.update(self[c].files())
2332 efiles.update(self[c].files())
2333 efiles = len(efiles)
2333 efiles = len(efiles)
2334 self.ui.progress(_('changesets'), None)
2334 self.ui.progress(_('changesets'), None)
2335
2335
2336 # pull off the manifest group
2336 # pull off the manifest group
2337 self.ui.status(_("adding manifests\n"))
2337 self.ui.status(_("adding manifests\n"))
2338 pr.step = _('manifests')
2338 pr.step = _('manifests')
2339 pr.count = 1
2339 pr.count = 1
2340 pr.total = changesets # manifests <= changesets
2340 pr.total = changesets # manifests <= changesets
2341 # no need to check for empty manifest group here:
2341 # no need to check for empty manifest group here:
2342 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2342 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2343 # no new manifest will be created and the manifest group will
2343 # no new manifest will be created and the manifest group will
2344 # be empty during the pull
2344 # be empty during the pull
2345 source.manifestheader()
2345 source.manifestheader()
2346 self.manifest.addgroup(source, revmap, trp)
2346 self.manifest.addgroup(source, revmap, trp)
2347 self.ui.progress(_('manifests'), None)
2347 self.ui.progress(_('manifests'), None)
2348
2348
2349 needfiles = {}
2349 needfiles = {}
2350 if self.ui.configbool('server', 'validate', default=False):
2350 if self.ui.configbool('server', 'validate', default=False):
2351 # validate incoming csets have their manifests
2351 # validate incoming csets have their manifests
2352 for cset in xrange(clstart, clend):
2352 for cset in xrange(clstart, clend):
2353 mfest = self.changelog.read(self.changelog.node(cset))[0]
2353 mfest = self.changelog.read(self.changelog.node(cset))[0]
2354 mfest = self.manifest.readdelta(mfest)
2354 mfest = self.manifest.readdelta(mfest)
2355 # store file nodes we must see
2355 # store file nodes we must see
2356 for f, n in mfest.iteritems():
2356 for f, n in mfest.iteritems():
2357 needfiles.setdefault(f, set()).add(n)
2357 needfiles.setdefault(f, set()).add(n)
2358
2358
2359 # process the files
2359 # process the files
2360 self.ui.status(_("adding file changes\n"))
2360 self.ui.status(_("adding file changes\n"))
2361 pr.step = _('files')
2361 pr.step = _('files')
2362 pr.count = 1
2362 pr.count = 1
2363 pr.total = efiles
2363 pr.total = efiles
2364 source.callback = None
2364 source.callback = None
2365
2365
2366 while True:
2366 while True:
2367 chunkdata = source.filelogheader()
2367 chunkdata = source.filelogheader()
2368 if not chunkdata:
2368 if not chunkdata:
2369 break
2369 break
2370 f = chunkdata["filename"]
2370 f = chunkdata["filename"]
2371 self.ui.debug("adding %s revisions\n" % f)
2371 self.ui.debug("adding %s revisions\n" % f)
2372 pr()
2372 pr()
2373 fl = self.file(f)
2373 fl = self.file(f)
2374 o = len(fl)
2374 o = len(fl)
2375 if not fl.addgroup(source, revmap, trp):
2375 if not fl.addgroup(source, revmap, trp):
2376 raise util.Abort(_("received file revlog group is empty"))
2376 raise util.Abort(_("received file revlog group is empty"))
2377 revisions += len(fl) - o
2377 revisions += len(fl) - o
2378 files += 1
2378 files += 1
2379 if f in needfiles:
2379 if f in needfiles:
2380 needs = needfiles[f]
2380 needs = needfiles[f]
2381 for new in xrange(o, len(fl)):
2381 for new in xrange(o, len(fl)):
2382 n = fl.node(new)
2382 n = fl.node(new)
2383 if n in needs:
2383 if n in needs:
2384 needs.remove(n)
2384 needs.remove(n)
2385 if not needs:
2385 if not needs:
2386 del needfiles[f]
2386 del needfiles[f]
2387 self.ui.progress(_('files'), None)
2387 self.ui.progress(_('files'), None)
2388
2388
2389 for f, needs in needfiles.iteritems():
2389 for f, needs in needfiles.iteritems():
2390 fl = self.file(f)
2390 fl = self.file(f)
2391 for n in needs:
2391 for n in needs:
2392 try:
2392 try:
2393 fl.rev(n)
2393 fl.rev(n)
2394 except error.LookupError:
2394 except error.LookupError:
2395 raise util.Abort(
2395 raise util.Abort(
2396 _('missing file data for %s:%s - run hg verify') %
2396 _('missing file data for %s:%s - run hg verify') %
2397 (f, hex(n)))
2397 (f, hex(n)))
2398
2398
2399 dh = 0
2399 dh = 0
2400 if oldheads:
2400 if oldheads:
2401 heads = cl.heads()
2401 heads = cl.heads()
2402 dh = len(heads) - len(oldheads)
2402 dh = len(heads) - len(oldheads)
2403 for h in heads:
2403 for h in heads:
2404 if h not in oldheads and self[h].closesbranch():
2404 if h not in oldheads and self[h].closesbranch():
2405 dh -= 1
2405 dh -= 1
2406 htext = ""
2406 htext = ""
2407 if dh:
2407 if dh:
2408 htext = _(" (%+d heads)") % dh
2408 htext = _(" (%+d heads)") % dh
2409
2409
2410 self.ui.status(_("added %d changesets"
2410 self.ui.status(_("added %d changesets"
2411 " with %d changes to %d files%s\n")
2411 " with %d changes to %d files%s\n")
2412 % (changesets, revisions, files, htext))
2412 % (changesets, revisions, files, htext))
2413 obsolete.clearobscaches(self)
2413 obsolete.clearobscaches(self)
2414
2414
2415 if changesets > 0:
2415 if changesets > 0:
2416 p = lambda: cl.writepending() and self.root or ""
2416 p = lambda: cl.writepending() and self.root or ""
2417 self.hook('pretxnchangegroup', throw=True,
2417 self.hook('pretxnchangegroup', throw=True,
2418 node=hex(cl.node(clstart)), source=srctype,
2418 node=hex(cl.node(clstart)), source=srctype,
2419 url=url, pending=p)
2419 url=url, pending=p)
2420
2420
2421 added = [cl.node(r) for r in xrange(clstart, clend)]
2421 added = [cl.node(r) for r in xrange(clstart, clend)]
2422 publishing = self.ui.configbool('phases', 'publish', True)
2422 publishing = self.ui.configbool('phases', 'publish', True)
2423 if srctype == 'push':
2423 if srctype == 'push':
2424 # Old server can not push the boundary themself.
2424 # Old server can not push the boundary themself.
2425 # New server won't push the boundary if changeset already
2425 # New server won't push the boundary if changeset already
2426 # existed locally as secrete
2426 # existed locally as secrete
2427 #
2427 #
2428 # We should not use added here but the list of all change in
2428 # We should not use added here but the list of all change in
2429 # the bundle
2429 # the bundle
2430 if publishing:
2430 if publishing:
2431 phases.advanceboundary(self, phases.public, srccontent)
2431 phases.advanceboundary(self, phases.public, srccontent)
2432 else:
2432 else:
2433 phases.advanceboundary(self, phases.draft, srccontent)
2433 phases.advanceboundary(self, phases.draft, srccontent)
2434 phases.retractboundary(self, phases.draft, added)
2434 phases.retractboundary(self, phases.draft, added)
2435 elif srctype != 'strip':
2435 elif srctype != 'strip':
2436 # publishing only alter behavior during push
2436 # publishing only alter behavior during push
2437 #
2437 #
2438 # strip should not touch boundary at all
2438 # strip should not touch boundary at all
2439 phases.retractboundary(self, phases.draft, added)
2439 phases.retractboundary(self, phases.draft, added)
2440
2440
2441 # make changelog see real files again
2441 # make changelog see real files again
2442 cl.finalize(trp)
2442 cl.finalize(trp)
2443
2443
2444 tr.close()
2444 tr.close()
2445
2445
2446 if changesets > 0:
2446 if changesets > 0:
2447 self.updatebranchcache()
2447 self.updatebranchcache()
2448 def runhooks():
2448 def runhooks():
2449 # forcefully update the on-disk branch cache
2449 # forcefully update the on-disk branch cache
2450 self.ui.debug("updating the branch cache\n")
2450 self.ui.debug("updating the branch cache\n")
2451 self.hook("changegroup", node=hex(cl.node(clstart)),
2451 self.hook("changegroup", node=hex(cl.node(clstart)),
2452 source=srctype, url=url)
2452 source=srctype, url=url)
2453
2453
2454 for n in added:
2454 for n in added:
2455 self.hook("incoming", node=hex(n), source=srctype,
2455 self.hook("incoming", node=hex(n), source=srctype,
2456 url=url)
2456 url=url)
2457 self._afterlock(runhooks)
2457 self._afterlock(runhooks)
2458
2458
2459 finally:
2459 finally:
2460 tr.release()
2460 tr.release()
2461 # never return 0 here:
2461 # never return 0 here:
2462 if dh < 0:
2462 if dh < 0:
2463 return dh - 1
2463 return dh - 1
2464 else:
2464 else:
2465 return dh + 1
2465 return dh + 1
2466
2466
2467 def stream_in(self, remote, requirements):
2467 def stream_in(self, remote, requirements):
2468 lock = self.lock()
2468 lock = self.lock()
2469 try:
2469 try:
2470 # Save remote branchmap. We will use it later
2470 # Save remote branchmap. We will use it later
2471 # to speed up branchcache creation
2471 # to speed up branchcache creation
2472 rbranchmap = None
2472 rbranchmap = None
2473 if remote.capable("branchmap"):
2473 if remote.capable("branchmap"):
2474 rbranchmap = remote.branchmap()
2474 rbranchmap = remote.branchmap()
2475
2475
2476 fp = remote.stream_out()
2476 fp = remote.stream_out()
2477 l = fp.readline()
2477 l = fp.readline()
2478 try:
2478 try:
2479 resp = int(l)
2479 resp = int(l)
2480 except ValueError:
2480 except ValueError:
2481 raise error.ResponseError(
2481 raise error.ResponseError(
2482 _('unexpected response from remote server:'), l)
2482 _('unexpected response from remote server:'), l)
2483 if resp == 1:
2483 if resp == 1:
2484 raise util.Abort(_('operation forbidden by server'))
2484 raise util.Abort(_('operation forbidden by server'))
2485 elif resp == 2:
2485 elif resp == 2:
2486 raise util.Abort(_('locking the remote repository failed'))
2486 raise util.Abort(_('locking the remote repository failed'))
2487 elif resp != 0:
2487 elif resp != 0:
2488 raise util.Abort(_('the server sent an unknown error code'))
2488 raise util.Abort(_('the server sent an unknown error code'))
2489 self.ui.status(_('streaming all changes\n'))
2489 self.ui.status(_('streaming all changes\n'))
2490 l = fp.readline()
2490 l = fp.readline()
2491 try:
2491 try:
2492 total_files, total_bytes = map(int, l.split(' ', 1))
2492 total_files, total_bytes = map(int, l.split(' ', 1))
2493 except (ValueError, TypeError):
2493 except (ValueError, TypeError):
2494 raise error.ResponseError(
2494 raise error.ResponseError(
2495 _('unexpected response from remote server:'), l)
2495 _('unexpected response from remote server:'), l)
2496 self.ui.status(_('%d files to transfer, %s of data\n') %
2496 self.ui.status(_('%d files to transfer, %s of data\n') %
2497 (total_files, util.bytecount(total_bytes)))
2497 (total_files, util.bytecount(total_bytes)))
2498 handled_bytes = 0
2498 handled_bytes = 0
2499 self.ui.progress(_('clone'), 0, total=total_bytes)
2499 self.ui.progress(_('clone'), 0, total=total_bytes)
2500 start = time.time()
2500 start = time.time()
2501 for i in xrange(total_files):
2501 for i in xrange(total_files):
2502 # XXX doesn't support '\n' or '\r' in filenames
2502 # XXX doesn't support '\n' or '\r' in filenames
2503 l = fp.readline()
2503 l = fp.readline()
2504 try:
2504 try:
2505 name, size = l.split('\0', 1)
2505 name, size = l.split('\0', 1)
2506 size = int(size)
2506 size = int(size)
2507 except (ValueError, TypeError):
2507 except (ValueError, TypeError):
2508 raise error.ResponseError(
2508 raise error.ResponseError(
2509 _('unexpected response from remote server:'), l)
2509 _('unexpected response from remote server:'), l)
2510 if self.ui.debugflag:
2510 if self.ui.debugflag:
2511 self.ui.debug('adding %s (%s)\n' %
2511 self.ui.debug('adding %s (%s)\n' %
2512 (name, util.bytecount(size)))
2512 (name, util.bytecount(size)))
2513 # for backwards compat, name was partially encoded
2513 # for backwards compat, name was partially encoded
2514 ofp = self.sopener(store.decodedir(name), 'w')
2514 ofp = self.sopener(store.decodedir(name), 'w')
2515 for chunk in util.filechunkiter(fp, limit=size):
2515 for chunk in util.filechunkiter(fp, limit=size):
2516 handled_bytes += len(chunk)
2516 handled_bytes += len(chunk)
2517 self.ui.progress(_('clone'), handled_bytes,
2517 self.ui.progress(_('clone'), handled_bytes,
2518 total=total_bytes)
2518 total=total_bytes)
2519 ofp.write(chunk)
2519 ofp.write(chunk)
2520 ofp.close()
2520 ofp.close()
2521 elapsed = time.time() - start
2521 elapsed = time.time() - start
2522 if elapsed <= 0:
2522 if elapsed <= 0:
2523 elapsed = 0.001
2523 elapsed = 0.001
2524 self.ui.progress(_('clone'), None)
2524 self.ui.progress(_('clone'), None)
2525 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2525 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2526 (util.bytecount(total_bytes), elapsed,
2526 (util.bytecount(total_bytes), elapsed,
2527 util.bytecount(total_bytes / elapsed)))
2527 util.bytecount(total_bytes / elapsed)))
2528
2528
2529 # new requirements = old non-format requirements +
2529 # new requirements = old non-format requirements +
2530 # new format-related
2530 # new format-related
2531 # requirements from the streamed-in repository
2531 # requirements from the streamed-in repository
2532 requirements.update(set(self.requirements) - self.supportedformats)
2532 requirements.update(set(self.requirements) - self.supportedformats)
2533 self._applyrequirements(requirements)
2533 self._applyrequirements(requirements)
2534 self._writerequirements()
2534 self._writerequirements()
2535
2535
2536 if rbranchmap:
2536 if rbranchmap:
2537 rbheads = []
2537 rbheads = []
2538 for bheads in rbranchmap.itervalues():
2538 for bheads in rbranchmap.itervalues():
2539 rbheads.extend(bheads)
2539 rbheads.extend(bheads)
2540
2540
2541 self.branchcache = rbranchmap
2541 self.branchcache = rbranchmap
2542 if rbheads:
2542 if rbheads:
2543 rtiprev = max((int(self.changelog.rev(node))
2543 rtiprev = max((int(self.changelog.rev(node))
2544 for node in rbheads))
2544 for node in rbheads))
2545 self._writebranchcache(self.branchcache,
2545 self._writebranchcache(self.branchcache,
2546 self[rtiprev].node(), rtiprev)
2546 self[rtiprev].node(), rtiprev)
2547 self.invalidate()
2547 self.invalidate()
2548 return len(self.heads()) + 1
2548 return len(self.heads()) + 1
2549 finally:
2549 finally:
2550 lock.release()
2550 lock.release()
2551
2551
2552 def clone(self, remote, heads=[], stream=False):
2552 def clone(self, remote, heads=[], stream=False):
2553 '''clone remote repository.
2553 '''clone remote repository.
2554
2554
2555 keyword arguments:
2555 keyword arguments:
2556 heads: list of revs to clone (forces use of pull)
2556 heads: list of revs to clone (forces use of pull)
2557 stream: use streaming clone if possible'''
2557 stream: use streaming clone if possible'''
2558
2558
2559 # now, all clients that can request uncompressed clones can
2559 # now, all clients that can request uncompressed clones can
2560 # read repo formats supported by all servers that can serve
2560 # read repo formats supported by all servers that can serve
2561 # them.
2561 # them.
2562
2562
2563 # if revlog format changes, client will have to check version
2563 # if revlog format changes, client will have to check version
2564 # and format flags on "stream" capability, and use
2564 # and format flags on "stream" capability, and use
2565 # uncompressed only if compatible.
2565 # uncompressed only if compatible.
2566
2566
2567 if not stream:
2567 if not stream:
2568 # if the server explicitly prefers to stream (for fast LANs)
2568 # if the server explicitly prefers to stream (for fast LANs)
2569 stream = remote.capable('stream-preferred')
2569 stream = remote.capable('stream-preferred')
2570
2570
2571 if stream and not heads:
2571 if stream and not heads:
2572 # 'stream' means remote revlog format is revlogv1 only
2572 # 'stream' means remote revlog format is revlogv1 only
2573 if remote.capable('stream'):
2573 if remote.capable('stream'):
2574 return self.stream_in(remote, set(('revlogv1',)))
2574 return self.stream_in(remote, set(('revlogv1',)))
2575 # otherwise, 'streamreqs' contains the remote revlog format
2575 # otherwise, 'streamreqs' contains the remote revlog format
2576 streamreqs = remote.capable('streamreqs')
2576 streamreqs = remote.capable('streamreqs')
2577 if streamreqs:
2577 if streamreqs:
2578 streamreqs = set(streamreqs.split(','))
2578 streamreqs = set(streamreqs.split(','))
2579 # if we support it, stream in and adjust our requirements
2579 # if we support it, stream in and adjust our requirements
2580 if not streamreqs - self.supportedformats:
2580 if not streamreqs - self.supportedformats:
2581 return self.stream_in(remote, streamreqs)
2581 return self.stream_in(remote, streamreqs)
2582 return self.pull(remote, heads)
2582 return self.pull(remote, heads)
2583
2583
2584 def pushkey(self, namespace, key, old, new):
2584 def pushkey(self, namespace, key, old, new):
2585 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2585 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2586 old=old, new=new)
2586 old=old, new=new)
2587 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2587 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2588 ret = pushkey.push(self, namespace, key, old, new)
2588 ret = pushkey.push(self, namespace, key, old, new)
2589 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2589 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2590 ret=ret)
2590 ret=ret)
2591 return ret
2591 return ret
2592
2592
2593 def listkeys(self, namespace):
2593 def listkeys(self, namespace):
2594 self.hook('prelistkeys', throw=True, namespace=namespace)
2594 self.hook('prelistkeys', throw=True, namespace=namespace)
2595 self.ui.debug('listing keys for "%s"\n' % namespace)
2595 self.ui.debug('listing keys for "%s"\n' % namespace)
2596 values = pushkey.list(self, namespace)
2596 values = pushkey.list(self, namespace)
2597 self.hook('listkeys', namespace=namespace, values=values)
2597 self.hook('listkeys', namespace=namespace, values=values)
2598 return values
2598 return values
2599
2599
2600 def debugwireargs(self, one, two, three=None, four=None, five=None):
2600 def debugwireargs(self, one, two, three=None, four=None, five=None):
2601 '''used to test argument passing over the wire'''
2601 '''used to test argument passing over the wire'''
2602 return "%s %s %s %s %s" % (one, two, three, four, five)
2602 return "%s %s %s %s %s" % (one, two, three, four, five)
2603
2603
2604 def savecommitmessage(self, text):
2604 def savecommitmessage(self, text):
2605 fp = self.opener('last-message.txt', 'wb')
2605 fp = self.opener('last-message.txt', 'wb')
2606 try:
2606 try:
2607 fp.write(text)
2607 fp.write(text)
2608 finally:
2608 finally:
2609 fp.close()
2609 fp.close()
2610 return self.pathto(fp.name[len(self.root)+1:])
2610 return self.pathto(fp.name[len(self.root)+1:])
2611
2611
2612 # used to avoid circular references so destructors work
2612 # used to avoid circular references so destructors work
2613 def aftertrans(files):
2613 def aftertrans(files):
2614 renamefiles = [tuple(t) for t in files]
2614 renamefiles = [tuple(t) for t in files]
2615 def a():
2615 def a():
2616 for src, dest in renamefiles:
2616 for src, dest in renamefiles:
2617 try:
2617 try:
2618 util.rename(src, dest)
2618 util.rename(src, dest)
2619 except OSError: # journal file does not yet exist
2619 except OSError: # journal file does not yet exist
2620 pass
2620 pass
2621 return a
2621 return a
2622
2622
2623 def undoname(fn):
2623 def undoname(fn):
2624 base, name = os.path.split(fn)
2624 base, name = os.path.split(fn)
2625 assert name.startswith('journal')
2625 assert name.startswith('journal')
2626 return os.path.join(base, name.replace('journal', 'undo', 1))
2626 return os.path.join(base, name.replace('journal', 'undo', 1))
2627
2627
2628 def instance(ui, path, create):
2628 def instance(ui, path, create):
2629 return localrepository(ui, util.urllocalpath(path), create)
2629 return localrepository(ui, util.urllocalpath(path), create)
2630
2630
2631 def islocal(path):
2631 def islocal(path):
2632 return True
2632 return True
General Comments 0
You need to be logged in to leave comments. Login now