##// END OF EJS Templates
clfilter: introduce an "unfiltered" method on localrepo...
Pierre-Yves David -
r17993:1a6f8820 default
parent child Browse files
Show More
@@ -1,2629 +1,2635 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import bin, hex, nullid, nullrev, short
7 from node import bin, hex, nullid, nullrev, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock, transaction, store, encoding, base85
11 import lock, transaction, store, encoding, base85
12 import scmutil, util, extensions, hook, error, revset
12 import scmutil, util, extensions, hook, error, revset
13 import match as matchmod
13 import match as matchmod
14 import merge as mergemod
14 import merge as mergemod
15 import tags as tagsmod
15 import tags as tagsmod
16 from lock import release
16 from lock import release
17 import weakref, errno, os, time, inspect
17 import weakref, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19 filecache = scmutil.filecache
19 filecache = scmutil.filecache
20
20
21 class storecache(filecache):
21 class storecache(filecache):
22 """filecache for files in the store"""
22 """filecache for files in the store"""
23 def join(self, obj, fname):
23 def join(self, obj, fname):
24 return obj.sjoin(fname)
24 return obj.sjoin(fname)
25
25
26 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
26 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
27 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
27 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
28
28
29 class localpeer(peer.peerrepository):
29 class localpeer(peer.peerrepository):
30 '''peer for a local repo; reflects only the most recent API'''
30 '''peer for a local repo; reflects only the most recent API'''
31
31
32 def __init__(self, repo, caps=MODERNCAPS):
32 def __init__(self, repo, caps=MODERNCAPS):
33 peer.peerrepository.__init__(self)
33 peer.peerrepository.__init__(self)
34 self._repo = repo
34 self._repo = repo
35 self.ui = repo.ui
35 self.ui = repo.ui
36 self._caps = repo._restrictcapabilities(caps)
36 self._caps = repo._restrictcapabilities(caps)
37 self.requirements = repo.requirements
37 self.requirements = repo.requirements
38 self.supportedformats = repo.supportedformats
38 self.supportedformats = repo.supportedformats
39
39
40 def close(self):
40 def close(self):
41 self._repo.close()
41 self._repo.close()
42
42
43 def _capabilities(self):
43 def _capabilities(self):
44 return self._caps
44 return self._caps
45
45
46 def local(self):
46 def local(self):
47 return self._repo
47 return self._repo
48
48
49 def canpush(self):
49 def canpush(self):
50 return True
50 return True
51
51
52 def url(self):
52 def url(self):
53 return self._repo.url()
53 return self._repo.url()
54
54
55 def lookup(self, key):
55 def lookup(self, key):
56 return self._repo.lookup(key)
56 return self._repo.lookup(key)
57
57
58 def branchmap(self):
58 def branchmap(self):
59 return discovery.visiblebranchmap(self._repo)
59 return discovery.visiblebranchmap(self._repo)
60
60
61 def heads(self):
61 def heads(self):
62 return discovery.visibleheads(self._repo)
62 return discovery.visibleheads(self._repo)
63
63
64 def known(self, nodes):
64 def known(self, nodes):
65 return self._repo.known(nodes)
65 return self._repo.known(nodes)
66
66
67 def getbundle(self, source, heads=None, common=None):
67 def getbundle(self, source, heads=None, common=None):
68 return self._repo.getbundle(source, heads=heads, common=common)
68 return self._repo.getbundle(source, heads=heads, common=common)
69
69
70 # TODO We might want to move the next two calls into legacypeer and add
70 # TODO We might want to move the next two calls into legacypeer and add
71 # unbundle instead.
71 # unbundle instead.
72
72
73 def lock(self):
73 def lock(self):
74 return self._repo.lock()
74 return self._repo.lock()
75
75
76 def addchangegroup(self, cg, source, url):
76 def addchangegroup(self, cg, source, url):
77 return self._repo.addchangegroup(cg, source, url)
77 return self._repo.addchangegroup(cg, source, url)
78
78
79 def pushkey(self, namespace, key, old, new):
79 def pushkey(self, namespace, key, old, new):
80 return self._repo.pushkey(namespace, key, old, new)
80 return self._repo.pushkey(namespace, key, old, new)
81
81
82 def listkeys(self, namespace):
82 def listkeys(self, namespace):
83 return self._repo.listkeys(namespace)
83 return self._repo.listkeys(namespace)
84
84
85 def debugwireargs(self, one, two, three=None, four=None, five=None):
85 def debugwireargs(self, one, two, three=None, four=None, five=None):
86 '''used to test argument passing over the wire'''
86 '''used to test argument passing over the wire'''
87 return "%s %s %s %s %s" % (one, two, three, four, five)
87 return "%s %s %s %s %s" % (one, two, three, four, five)
88
88
89 class locallegacypeer(localpeer):
89 class locallegacypeer(localpeer):
90 '''peer extension which implements legacy methods too; used for tests with
90 '''peer extension which implements legacy methods too; used for tests with
91 restricted capabilities'''
91 restricted capabilities'''
92
92
93 def __init__(self, repo):
93 def __init__(self, repo):
94 localpeer.__init__(self, repo, caps=LEGACYCAPS)
94 localpeer.__init__(self, repo, caps=LEGACYCAPS)
95
95
96 def branches(self, nodes):
96 def branches(self, nodes):
97 return self._repo.branches(nodes)
97 return self._repo.branches(nodes)
98
98
99 def between(self, pairs):
99 def between(self, pairs):
100 return self._repo.between(pairs)
100 return self._repo.between(pairs)
101
101
102 def changegroup(self, basenodes, source):
102 def changegroup(self, basenodes, source):
103 return self._repo.changegroup(basenodes, source)
103 return self._repo.changegroup(basenodes, source)
104
104
105 def changegroupsubset(self, bases, heads, source):
105 def changegroupsubset(self, bases, heads, source):
106 return self._repo.changegroupsubset(bases, heads, source)
106 return self._repo.changegroupsubset(bases, heads, source)
107
107
108 class localrepository(object):
108 class localrepository(object):
109
109
110 supportedformats = set(('revlogv1', 'generaldelta'))
110 supportedformats = set(('revlogv1', 'generaldelta'))
111 supported = supportedformats | set(('store', 'fncache', 'shared',
111 supported = supportedformats | set(('store', 'fncache', 'shared',
112 'dotencode'))
112 'dotencode'))
113 openerreqs = set(('revlogv1', 'generaldelta'))
113 openerreqs = set(('revlogv1', 'generaldelta'))
114 requirements = ['revlogv1']
114 requirements = ['revlogv1']
115
115
116 def _baserequirements(self, create):
116 def _baserequirements(self, create):
117 return self.requirements[:]
117 return self.requirements[:]
118
118
119 def __init__(self, baseui, path=None, create=False):
119 def __init__(self, baseui, path=None, create=False):
120 self.wvfs = scmutil.vfs(path, expand=True)
120 self.wvfs = scmutil.vfs(path, expand=True)
121 self.wopener = self.wvfs
121 self.wopener = self.wvfs
122 self.root = self.wvfs.base
122 self.root = self.wvfs.base
123 self.path = self.wvfs.join(".hg")
123 self.path = self.wvfs.join(".hg")
124 self.origroot = path
124 self.origroot = path
125 self.auditor = scmutil.pathauditor(self.root, self._checknested)
125 self.auditor = scmutil.pathauditor(self.root, self._checknested)
126 self.vfs = scmutil.vfs(self.path)
126 self.vfs = scmutil.vfs(self.path)
127 self.opener = self.vfs
127 self.opener = self.vfs
128 self.baseui = baseui
128 self.baseui = baseui
129 self.ui = baseui.copy()
129 self.ui = baseui.copy()
130 # A list of callback to shape the phase if no data were found.
130 # A list of callback to shape the phase if no data were found.
131 # Callback are in the form: func(repo, roots) --> processed root.
131 # Callback are in the form: func(repo, roots) --> processed root.
132 # This list it to be filled by extension during repo setup
132 # This list it to be filled by extension during repo setup
133 self._phasedefaults = []
133 self._phasedefaults = []
134 try:
134 try:
135 self.ui.readconfig(self.join("hgrc"), self.root)
135 self.ui.readconfig(self.join("hgrc"), self.root)
136 extensions.loadall(self.ui)
136 extensions.loadall(self.ui)
137 except IOError:
137 except IOError:
138 pass
138 pass
139
139
140 if not self.vfs.isdir():
140 if not self.vfs.isdir():
141 if create:
141 if create:
142 if not self.wvfs.exists():
142 if not self.wvfs.exists():
143 self.wvfs.makedirs()
143 self.wvfs.makedirs()
144 self.vfs.makedir(notindexed=True)
144 self.vfs.makedir(notindexed=True)
145 requirements = self._baserequirements(create)
145 requirements = self._baserequirements(create)
146 if self.ui.configbool('format', 'usestore', True):
146 if self.ui.configbool('format', 'usestore', True):
147 self.vfs.mkdir("store")
147 self.vfs.mkdir("store")
148 requirements.append("store")
148 requirements.append("store")
149 if self.ui.configbool('format', 'usefncache', True):
149 if self.ui.configbool('format', 'usefncache', True):
150 requirements.append("fncache")
150 requirements.append("fncache")
151 if self.ui.configbool('format', 'dotencode', True):
151 if self.ui.configbool('format', 'dotencode', True):
152 requirements.append('dotencode')
152 requirements.append('dotencode')
153 # create an invalid changelog
153 # create an invalid changelog
154 self.vfs.append(
154 self.vfs.append(
155 "00changelog.i",
155 "00changelog.i",
156 '\0\0\0\2' # represents revlogv2
156 '\0\0\0\2' # represents revlogv2
157 ' dummy changelog to prevent using the old repo layout'
157 ' dummy changelog to prevent using the old repo layout'
158 )
158 )
159 if self.ui.configbool('format', 'generaldelta', False):
159 if self.ui.configbool('format', 'generaldelta', False):
160 requirements.append("generaldelta")
160 requirements.append("generaldelta")
161 requirements = set(requirements)
161 requirements = set(requirements)
162 else:
162 else:
163 raise error.RepoError(_("repository %s not found") % path)
163 raise error.RepoError(_("repository %s not found") % path)
164 elif create:
164 elif create:
165 raise error.RepoError(_("repository %s already exists") % path)
165 raise error.RepoError(_("repository %s already exists") % path)
166 else:
166 else:
167 try:
167 try:
168 requirements = scmutil.readrequires(self.vfs, self.supported)
168 requirements = scmutil.readrequires(self.vfs, self.supported)
169 except IOError, inst:
169 except IOError, inst:
170 if inst.errno != errno.ENOENT:
170 if inst.errno != errno.ENOENT:
171 raise
171 raise
172 requirements = set()
172 requirements = set()
173
173
174 self.sharedpath = self.path
174 self.sharedpath = self.path
175 try:
175 try:
176 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
176 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
177 if not os.path.exists(s):
177 if not os.path.exists(s):
178 raise error.RepoError(
178 raise error.RepoError(
179 _('.hg/sharedpath points to nonexistent directory %s') % s)
179 _('.hg/sharedpath points to nonexistent directory %s') % s)
180 self.sharedpath = s
180 self.sharedpath = s
181 except IOError, inst:
181 except IOError, inst:
182 if inst.errno != errno.ENOENT:
182 if inst.errno != errno.ENOENT:
183 raise
183 raise
184
184
185 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
185 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
186 self.spath = self.store.path
186 self.spath = self.store.path
187 self.svfs = self.store.vfs
187 self.svfs = self.store.vfs
188 self.sopener = self.svfs
188 self.sopener = self.svfs
189 self.sjoin = self.store.join
189 self.sjoin = self.store.join
190 self.vfs.createmode = self.store.createmode
190 self.vfs.createmode = self.store.createmode
191 self._applyrequirements(requirements)
191 self._applyrequirements(requirements)
192 if create:
192 if create:
193 self._writerequirements()
193 self._writerequirements()
194
194
195
195
196 self._branchcache = None
196 self._branchcache = None
197 self._branchcachetip = None
197 self._branchcachetip = None
198 self.filterpats = {}
198 self.filterpats = {}
199 self._datafilters = {}
199 self._datafilters = {}
200 self._transref = self._lockref = self._wlockref = None
200 self._transref = self._lockref = self._wlockref = None
201
201
202 # A cache for various files under .hg/ that tracks file changes,
202 # A cache for various files under .hg/ that tracks file changes,
203 # (used by the filecache decorator)
203 # (used by the filecache decorator)
204 #
204 #
205 # Maps a property name to its util.filecacheentry
205 # Maps a property name to its util.filecacheentry
206 self._filecache = {}
206 self._filecache = {}
207
207
208 def close(self):
208 def close(self):
209 pass
209 pass
210
210
211 def _restrictcapabilities(self, caps):
211 def _restrictcapabilities(self, caps):
212 return caps
212 return caps
213
213
214 def _applyrequirements(self, requirements):
214 def _applyrequirements(self, requirements):
215 self.requirements = requirements
215 self.requirements = requirements
216 self.sopener.options = dict((r, 1) for r in requirements
216 self.sopener.options = dict((r, 1) for r in requirements
217 if r in self.openerreqs)
217 if r in self.openerreqs)
218
218
219 def _writerequirements(self):
219 def _writerequirements(self):
220 reqfile = self.opener("requires", "w")
220 reqfile = self.opener("requires", "w")
221 for r in self.requirements:
221 for r in self.requirements:
222 reqfile.write("%s\n" % r)
222 reqfile.write("%s\n" % r)
223 reqfile.close()
223 reqfile.close()
224
224
225 def _checknested(self, path):
225 def _checknested(self, path):
226 """Determine if path is a legal nested repository."""
226 """Determine if path is a legal nested repository."""
227 if not path.startswith(self.root):
227 if not path.startswith(self.root):
228 return False
228 return False
229 subpath = path[len(self.root) + 1:]
229 subpath = path[len(self.root) + 1:]
230 normsubpath = util.pconvert(subpath)
230 normsubpath = util.pconvert(subpath)
231
231
232 # XXX: Checking against the current working copy is wrong in
232 # XXX: Checking against the current working copy is wrong in
233 # the sense that it can reject things like
233 # the sense that it can reject things like
234 #
234 #
235 # $ hg cat -r 10 sub/x.txt
235 # $ hg cat -r 10 sub/x.txt
236 #
236 #
237 # if sub/ is no longer a subrepository in the working copy
237 # if sub/ is no longer a subrepository in the working copy
238 # parent revision.
238 # parent revision.
239 #
239 #
240 # However, it can of course also allow things that would have
240 # However, it can of course also allow things that would have
241 # been rejected before, such as the above cat command if sub/
241 # been rejected before, such as the above cat command if sub/
242 # is a subrepository now, but was a normal directory before.
242 # is a subrepository now, but was a normal directory before.
243 # The old path auditor would have rejected by mistake since it
243 # The old path auditor would have rejected by mistake since it
244 # panics when it sees sub/.hg/.
244 # panics when it sees sub/.hg/.
245 #
245 #
246 # All in all, checking against the working copy seems sensible
246 # All in all, checking against the working copy seems sensible
247 # since we want to prevent access to nested repositories on
247 # since we want to prevent access to nested repositories on
248 # the filesystem *now*.
248 # the filesystem *now*.
249 ctx = self[None]
249 ctx = self[None]
250 parts = util.splitpath(subpath)
250 parts = util.splitpath(subpath)
251 while parts:
251 while parts:
252 prefix = '/'.join(parts)
252 prefix = '/'.join(parts)
253 if prefix in ctx.substate:
253 if prefix in ctx.substate:
254 if prefix == normsubpath:
254 if prefix == normsubpath:
255 return True
255 return True
256 else:
256 else:
257 sub = ctx.sub(prefix)
257 sub = ctx.sub(prefix)
258 return sub.checknested(subpath[len(prefix) + 1:])
258 return sub.checknested(subpath[len(prefix) + 1:])
259 else:
259 else:
260 parts.pop()
260 parts.pop()
261 return False
261 return False
262
262
263 def peer(self):
263 def peer(self):
264 return localpeer(self) # not cached to avoid reference cycle
264 return localpeer(self) # not cached to avoid reference cycle
265
265
266 def unfiltered(self):
267 """Return unfiltered version of the repository
268
269 Intended to be ovewritten by filtered repo."""
270 return self
271
266 @filecache('bookmarks')
272 @filecache('bookmarks')
267 def _bookmarks(self):
273 def _bookmarks(self):
268 return bookmarks.bmstore(self)
274 return bookmarks.bmstore(self)
269
275
270 @filecache('bookmarks.current')
276 @filecache('bookmarks.current')
271 def _bookmarkcurrent(self):
277 def _bookmarkcurrent(self):
272 return bookmarks.readcurrent(self)
278 return bookmarks.readcurrent(self)
273
279
274 def bookmarkheads(self, bookmark):
280 def bookmarkheads(self, bookmark):
275 name = bookmark.split('@', 1)[0]
281 name = bookmark.split('@', 1)[0]
276 heads = []
282 heads = []
277 for mark, n in self._bookmarks.iteritems():
283 for mark, n in self._bookmarks.iteritems():
278 if mark.split('@', 1)[0] == name:
284 if mark.split('@', 1)[0] == name:
279 heads.append(n)
285 heads.append(n)
280 return heads
286 return heads
281
287
282 @storecache('phaseroots')
288 @storecache('phaseroots')
283 def _phasecache(self):
289 def _phasecache(self):
284 return phases.phasecache(self, self._phasedefaults)
290 return phases.phasecache(self, self._phasedefaults)
285
291
286 @storecache('obsstore')
292 @storecache('obsstore')
287 def obsstore(self):
293 def obsstore(self):
288 store = obsolete.obsstore(self.sopener)
294 store = obsolete.obsstore(self.sopener)
289 if store and not obsolete._enabled:
295 if store and not obsolete._enabled:
290 # message is rare enough to not be translated
296 # message is rare enough to not be translated
291 msg = 'obsolete feature not enabled but %i markers found!\n'
297 msg = 'obsolete feature not enabled but %i markers found!\n'
292 self.ui.warn(msg % len(list(store)))
298 self.ui.warn(msg % len(list(store)))
293 return store
299 return store
294
300
295 @propertycache
301 @propertycache
296 def hiddenrevs(self):
302 def hiddenrevs(self):
297 """hiddenrevs: revs that should be hidden by command and tools
303 """hiddenrevs: revs that should be hidden by command and tools
298
304
299 This set is carried on the repo to ease initialization and lazy
305 This set is carried on the repo to ease initialization and lazy
300 loading; it'll probably move back to changelog for efficiency and
306 loading; it'll probably move back to changelog for efficiency and
301 consistency reasons.
307 consistency reasons.
302
308
303 Note that the hiddenrevs will needs invalidations when
309 Note that the hiddenrevs will needs invalidations when
304 - a new changesets is added (possible unstable above extinct)
310 - a new changesets is added (possible unstable above extinct)
305 - a new obsolete marker is added (possible new extinct changeset)
311 - a new obsolete marker is added (possible new extinct changeset)
306
312
307 hidden changesets cannot have non-hidden descendants
313 hidden changesets cannot have non-hidden descendants
308 """
314 """
309 hidden = set()
315 hidden = set()
310 if self.obsstore:
316 if self.obsstore:
311 ### hide extinct changeset that are not accessible by any mean
317 ### hide extinct changeset that are not accessible by any mean
312 hiddenquery = 'extinct() - ::(. + bookmark())'
318 hiddenquery = 'extinct() - ::(. + bookmark())'
313 hidden.update(self.revs(hiddenquery))
319 hidden.update(self.revs(hiddenquery))
314 return hidden
320 return hidden
315
321
316 @storecache('00changelog.i')
322 @storecache('00changelog.i')
317 def changelog(self):
323 def changelog(self):
318 c = changelog.changelog(self.sopener)
324 c = changelog.changelog(self.sopener)
319 if 'HG_PENDING' in os.environ:
325 if 'HG_PENDING' in os.environ:
320 p = os.environ['HG_PENDING']
326 p = os.environ['HG_PENDING']
321 if p.startswith(self.root):
327 if p.startswith(self.root):
322 c.readpending('00changelog.i.a')
328 c.readpending('00changelog.i.a')
323 return c
329 return c
324
330
325 @storecache('00manifest.i')
331 @storecache('00manifest.i')
326 def manifest(self):
332 def manifest(self):
327 return manifest.manifest(self.sopener)
333 return manifest.manifest(self.sopener)
328
334
329 @filecache('dirstate')
335 @filecache('dirstate')
330 def dirstate(self):
336 def dirstate(self):
331 warned = [0]
337 warned = [0]
332 def validate(node):
338 def validate(node):
333 try:
339 try:
334 self.changelog.rev(node)
340 self.changelog.rev(node)
335 return node
341 return node
336 except error.LookupError:
342 except error.LookupError:
337 if not warned[0]:
343 if not warned[0]:
338 warned[0] = True
344 warned[0] = True
339 self.ui.warn(_("warning: ignoring unknown"
345 self.ui.warn(_("warning: ignoring unknown"
340 " working parent %s!\n") % short(node))
346 " working parent %s!\n") % short(node))
341 return nullid
347 return nullid
342
348
343 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
349 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
344
350
345 def __getitem__(self, changeid):
351 def __getitem__(self, changeid):
346 if changeid is None:
352 if changeid is None:
347 return context.workingctx(self)
353 return context.workingctx(self)
348 return context.changectx(self, changeid)
354 return context.changectx(self, changeid)
349
355
350 def __contains__(self, changeid):
356 def __contains__(self, changeid):
351 try:
357 try:
352 return bool(self.lookup(changeid))
358 return bool(self.lookup(changeid))
353 except error.RepoLookupError:
359 except error.RepoLookupError:
354 return False
360 return False
355
361
356 def __nonzero__(self):
362 def __nonzero__(self):
357 return True
363 return True
358
364
359 def __len__(self):
365 def __len__(self):
360 return len(self.changelog)
366 return len(self.changelog)
361
367
362 def __iter__(self):
368 def __iter__(self):
363 return iter(self.changelog)
369 return iter(self.changelog)
364
370
365 def revs(self, expr, *args):
371 def revs(self, expr, *args):
366 '''Return a list of revisions matching the given revset'''
372 '''Return a list of revisions matching the given revset'''
367 expr = revset.formatspec(expr, *args)
373 expr = revset.formatspec(expr, *args)
368 m = revset.match(None, expr)
374 m = revset.match(None, expr)
369 return [r for r in m(self, list(self))]
375 return [r for r in m(self, list(self))]
370
376
371 def set(self, expr, *args):
377 def set(self, expr, *args):
372 '''
378 '''
373 Yield a context for each matching revision, after doing arg
379 Yield a context for each matching revision, after doing arg
374 replacement via revset.formatspec
380 replacement via revset.formatspec
375 '''
381 '''
376 for r in self.revs(expr, *args):
382 for r in self.revs(expr, *args):
377 yield self[r]
383 yield self[r]
378
384
379 def url(self):
385 def url(self):
380 return 'file:' + self.root
386 return 'file:' + self.root
381
387
382 def hook(self, name, throw=False, **args):
388 def hook(self, name, throw=False, **args):
383 return hook.hook(self.ui, self, name, throw, **args)
389 return hook.hook(self.ui, self, name, throw, **args)
384
390
385 def _tag(self, names, node, message, local, user, date, extra={}):
391 def _tag(self, names, node, message, local, user, date, extra={}):
386 if isinstance(names, str):
392 if isinstance(names, str):
387 names = (names,)
393 names = (names,)
388
394
389 branches = self.branchmap()
395 branches = self.branchmap()
390 for name in names:
396 for name in names:
391 self.hook('pretag', throw=True, node=hex(node), tag=name,
397 self.hook('pretag', throw=True, node=hex(node), tag=name,
392 local=local)
398 local=local)
393 if name in branches:
399 if name in branches:
394 self.ui.warn(_("warning: tag %s conflicts with existing"
400 self.ui.warn(_("warning: tag %s conflicts with existing"
395 " branch name\n") % name)
401 " branch name\n") % name)
396
402
397 def writetags(fp, names, munge, prevtags):
403 def writetags(fp, names, munge, prevtags):
398 fp.seek(0, 2)
404 fp.seek(0, 2)
399 if prevtags and prevtags[-1] != '\n':
405 if prevtags and prevtags[-1] != '\n':
400 fp.write('\n')
406 fp.write('\n')
401 for name in names:
407 for name in names:
402 m = munge and munge(name) or name
408 m = munge and munge(name) or name
403 if (self._tagscache.tagtypes and
409 if (self._tagscache.tagtypes and
404 name in self._tagscache.tagtypes):
410 name in self._tagscache.tagtypes):
405 old = self.tags().get(name, nullid)
411 old = self.tags().get(name, nullid)
406 fp.write('%s %s\n' % (hex(old), m))
412 fp.write('%s %s\n' % (hex(old), m))
407 fp.write('%s %s\n' % (hex(node), m))
413 fp.write('%s %s\n' % (hex(node), m))
408 fp.close()
414 fp.close()
409
415
410 prevtags = ''
416 prevtags = ''
411 if local:
417 if local:
412 try:
418 try:
413 fp = self.opener('localtags', 'r+')
419 fp = self.opener('localtags', 'r+')
414 except IOError:
420 except IOError:
415 fp = self.opener('localtags', 'a')
421 fp = self.opener('localtags', 'a')
416 else:
422 else:
417 prevtags = fp.read()
423 prevtags = fp.read()
418
424
419 # local tags are stored in the current charset
425 # local tags are stored in the current charset
420 writetags(fp, names, None, prevtags)
426 writetags(fp, names, None, prevtags)
421 for name in names:
427 for name in names:
422 self.hook('tag', node=hex(node), tag=name, local=local)
428 self.hook('tag', node=hex(node), tag=name, local=local)
423 return
429 return
424
430
425 try:
431 try:
426 fp = self.wfile('.hgtags', 'rb+')
432 fp = self.wfile('.hgtags', 'rb+')
427 except IOError, e:
433 except IOError, e:
428 if e.errno != errno.ENOENT:
434 if e.errno != errno.ENOENT:
429 raise
435 raise
430 fp = self.wfile('.hgtags', 'ab')
436 fp = self.wfile('.hgtags', 'ab')
431 else:
437 else:
432 prevtags = fp.read()
438 prevtags = fp.read()
433
439
434 # committed tags are stored in UTF-8
440 # committed tags are stored in UTF-8
435 writetags(fp, names, encoding.fromlocal, prevtags)
441 writetags(fp, names, encoding.fromlocal, prevtags)
436
442
437 fp.close()
443 fp.close()
438
444
439 self.invalidatecaches()
445 self.invalidatecaches()
440
446
441 if '.hgtags' not in self.dirstate:
447 if '.hgtags' not in self.dirstate:
442 self[None].add(['.hgtags'])
448 self[None].add(['.hgtags'])
443
449
444 m = matchmod.exact(self.root, '', ['.hgtags'])
450 m = matchmod.exact(self.root, '', ['.hgtags'])
445 tagnode = self.commit(message, user, date, extra=extra, match=m)
451 tagnode = self.commit(message, user, date, extra=extra, match=m)
446
452
447 for name in names:
453 for name in names:
448 self.hook('tag', node=hex(node), tag=name, local=local)
454 self.hook('tag', node=hex(node), tag=name, local=local)
449
455
450 return tagnode
456 return tagnode
451
457
452 def tag(self, names, node, message, local, user, date):
458 def tag(self, names, node, message, local, user, date):
453 '''tag a revision with one or more symbolic names.
459 '''tag a revision with one or more symbolic names.
454
460
455 names is a list of strings or, when adding a single tag, names may be a
461 names is a list of strings or, when adding a single tag, names may be a
456 string.
462 string.
457
463
458 if local is True, the tags are stored in a per-repository file.
464 if local is True, the tags are stored in a per-repository file.
459 otherwise, they are stored in the .hgtags file, and a new
465 otherwise, they are stored in the .hgtags file, and a new
460 changeset is committed with the change.
466 changeset is committed with the change.
461
467
462 keyword arguments:
468 keyword arguments:
463
469
464 local: whether to store tags in non-version-controlled file
470 local: whether to store tags in non-version-controlled file
465 (default False)
471 (default False)
466
472
467 message: commit message to use if committing
473 message: commit message to use if committing
468
474
469 user: name of user to use if committing
475 user: name of user to use if committing
470
476
471 date: date tuple to use if committing'''
477 date: date tuple to use if committing'''
472
478
473 if not local:
479 if not local:
474 for x in self.status()[:5]:
480 for x in self.status()[:5]:
475 if '.hgtags' in x:
481 if '.hgtags' in x:
476 raise util.Abort(_('working copy of .hgtags is changed '
482 raise util.Abort(_('working copy of .hgtags is changed '
477 '(please commit .hgtags manually)'))
483 '(please commit .hgtags manually)'))
478
484
479 self.tags() # instantiate the cache
485 self.tags() # instantiate the cache
480 self._tag(names, node, message, local, user, date)
486 self._tag(names, node, message, local, user, date)
481
487
482 @propertycache
488 @propertycache
483 def _tagscache(self):
489 def _tagscache(self):
484 '''Returns a tagscache object that contains various tags related
490 '''Returns a tagscache object that contains various tags related
485 caches.'''
491 caches.'''
486
492
487 # This simplifies its cache management by having one decorated
493 # This simplifies its cache management by having one decorated
488 # function (this one) and the rest simply fetch things from it.
494 # function (this one) and the rest simply fetch things from it.
489 class tagscache(object):
495 class tagscache(object):
490 def __init__(self):
496 def __init__(self):
491 # These two define the set of tags for this repository. tags
497 # These two define the set of tags for this repository. tags
492 # maps tag name to node; tagtypes maps tag name to 'global' or
498 # maps tag name to node; tagtypes maps tag name to 'global' or
493 # 'local'. (Global tags are defined by .hgtags across all
499 # 'local'. (Global tags are defined by .hgtags across all
494 # heads, and local tags are defined in .hg/localtags.)
500 # heads, and local tags are defined in .hg/localtags.)
495 # They constitute the in-memory cache of tags.
501 # They constitute the in-memory cache of tags.
496 self.tags = self.tagtypes = None
502 self.tags = self.tagtypes = None
497
503
498 self.nodetagscache = self.tagslist = None
504 self.nodetagscache = self.tagslist = None
499
505
500 cache = tagscache()
506 cache = tagscache()
501 cache.tags, cache.tagtypes = self._findtags()
507 cache.tags, cache.tagtypes = self._findtags()
502
508
503 return cache
509 return cache
504
510
505 def tags(self):
511 def tags(self):
506 '''return a mapping of tag to node'''
512 '''return a mapping of tag to node'''
507 t = {}
513 t = {}
508 if self.changelog.filteredrevs:
514 if self.changelog.filteredrevs:
509 tags, tt = self._findtags()
515 tags, tt = self._findtags()
510 else:
516 else:
511 tags = self._tagscache.tags
517 tags = self._tagscache.tags
512 for k, v in tags.iteritems():
518 for k, v in tags.iteritems():
513 try:
519 try:
514 # ignore tags to unknown nodes
520 # ignore tags to unknown nodes
515 self.changelog.rev(v)
521 self.changelog.rev(v)
516 t[k] = v
522 t[k] = v
517 except (error.LookupError, ValueError):
523 except (error.LookupError, ValueError):
518 pass
524 pass
519 return t
525 return t
520
526
521 def _findtags(self):
527 def _findtags(self):
522 '''Do the hard work of finding tags. Return a pair of dicts
528 '''Do the hard work of finding tags. Return a pair of dicts
523 (tags, tagtypes) where tags maps tag name to node, and tagtypes
529 (tags, tagtypes) where tags maps tag name to node, and tagtypes
524 maps tag name to a string like \'global\' or \'local\'.
530 maps tag name to a string like \'global\' or \'local\'.
525 Subclasses or extensions are free to add their own tags, but
531 Subclasses or extensions are free to add their own tags, but
526 should be aware that the returned dicts will be retained for the
532 should be aware that the returned dicts will be retained for the
527 duration of the localrepo object.'''
533 duration of the localrepo object.'''
528
534
529 # XXX what tagtype should subclasses/extensions use? Currently
535 # XXX what tagtype should subclasses/extensions use? Currently
530 # mq and bookmarks add tags, but do not set the tagtype at all.
536 # mq and bookmarks add tags, but do not set the tagtype at all.
531 # Should each extension invent its own tag type? Should there
537 # Should each extension invent its own tag type? Should there
532 # be one tagtype for all such "virtual" tags? Or is the status
538 # be one tagtype for all such "virtual" tags? Or is the status
533 # quo fine?
539 # quo fine?
534
540
535 alltags = {} # map tag name to (node, hist)
541 alltags = {} # map tag name to (node, hist)
536 tagtypes = {}
542 tagtypes = {}
537
543
538 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
544 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
539 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
545 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
540
546
541 # Build the return dicts. Have to re-encode tag names because
547 # Build the return dicts. Have to re-encode tag names because
542 # the tags module always uses UTF-8 (in order not to lose info
548 # the tags module always uses UTF-8 (in order not to lose info
543 # writing to the cache), but the rest of Mercurial wants them in
549 # writing to the cache), but the rest of Mercurial wants them in
544 # local encoding.
550 # local encoding.
545 tags = {}
551 tags = {}
546 for (name, (node, hist)) in alltags.iteritems():
552 for (name, (node, hist)) in alltags.iteritems():
547 if node != nullid:
553 if node != nullid:
548 tags[encoding.tolocal(name)] = node
554 tags[encoding.tolocal(name)] = node
549 tags['tip'] = self.changelog.tip()
555 tags['tip'] = self.changelog.tip()
550 tagtypes = dict([(encoding.tolocal(name), value)
556 tagtypes = dict([(encoding.tolocal(name), value)
551 for (name, value) in tagtypes.iteritems()])
557 for (name, value) in tagtypes.iteritems()])
552 return (tags, tagtypes)
558 return (tags, tagtypes)
553
559
554 def tagtype(self, tagname):
560 def tagtype(self, tagname):
555 '''
561 '''
556 return the type of the given tag. result can be:
562 return the type of the given tag. result can be:
557
563
558 'local' : a local tag
564 'local' : a local tag
559 'global' : a global tag
565 'global' : a global tag
560 None : tag does not exist
566 None : tag does not exist
561 '''
567 '''
562
568
563 return self._tagscache.tagtypes.get(tagname)
569 return self._tagscache.tagtypes.get(tagname)
564
570
565 def tagslist(self):
571 def tagslist(self):
566 '''return a list of tags ordered by revision'''
572 '''return a list of tags ordered by revision'''
567 if not self._tagscache.tagslist:
573 if not self._tagscache.tagslist:
568 l = []
574 l = []
569 for t, n in self.tags().iteritems():
575 for t, n in self.tags().iteritems():
570 r = self.changelog.rev(n)
576 r = self.changelog.rev(n)
571 l.append((r, t, n))
577 l.append((r, t, n))
572 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
578 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
573
579
574 return self._tagscache.tagslist
580 return self._tagscache.tagslist
575
581
576 def nodetags(self, node):
582 def nodetags(self, node):
577 '''return the tags associated with a node'''
583 '''return the tags associated with a node'''
578 if not self._tagscache.nodetagscache:
584 if not self._tagscache.nodetagscache:
579 nodetagscache = {}
585 nodetagscache = {}
580 for t, n in self._tagscache.tags.iteritems():
586 for t, n in self._tagscache.tags.iteritems():
581 nodetagscache.setdefault(n, []).append(t)
587 nodetagscache.setdefault(n, []).append(t)
582 for tags in nodetagscache.itervalues():
588 for tags in nodetagscache.itervalues():
583 tags.sort()
589 tags.sort()
584 self._tagscache.nodetagscache = nodetagscache
590 self._tagscache.nodetagscache = nodetagscache
585 return self._tagscache.nodetagscache.get(node, [])
591 return self._tagscache.nodetagscache.get(node, [])
586
592
587 def nodebookmarks(self, node):
593 def nodebookmarks(self, node):
588 marks = []
594 marks = []
589 for bookmark, n in self._bookmarks.iteritems():
595 for bookmark, n in self._bookmarks.iteritems():
590 if n == node:
596 if n == node:
591 marks.append(bookmark)
597 marks.append(bookmark)
592 return sorted(marks)
598 return sorted(marks)
593
599
594 def _branchtags(self, partial, lrev):
600 def _branchtags(self, partial, lrev):
595 # TODO: rename this function?
601 # TODO: rename this function?
596 tiprev = len(self) - 1
602 tiprev = len(self) - 1
597 if lrev != tiprev:
603 if lrev != tiprev:
598 ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
604 ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
599 self._updatebranchcache(partial, ctxgen)
605 self._updatebranchcache(partial, ctxgen)
600 self._writebranchcache(partial, self.changelog.tip(), tiprev)
606 self._writebranchcache(partial, self.changelog.tip(), tiprev)
601
607
602 return partial
608 return partial
603
609
604 def updatebranchcache(self):
610 def updatebranchcache(self):
605 tip = self.changelog.tip()
611 tip = self.changelog.tip()
606 if self._branchcache is not None and self._branchcachetip == tip:
612 if self._branchcache is not None and self._branchcachetip == tip:
607 return
613 return
608
614
609 oldtip = self._branchcachetip
615 oldtip = self._branchcachetip
610 self._branchcachetip = tip
616 self._branchcachetip = tip
611 if oldtip is None or oldtip not in self.changelog.nodemap:
617 if oldtip is None or oldtip not in self.changelog.nodemap:
612 partial, last, lrev = self._readbranchcache()
618 partial, last, lrev = self._readbranchcache()
613 else:
619 else:
614 lrev = self.changelog.rev(oldtip)
620 lrev = self.changelog.rev(oldtip)
615 partial = self._branchcache
621 partial = self._branchcache
616
622
617 self._branchtags(partial, lrev)
623 self._branchtags(partial, lrev)
618 # this private cache holds all heads (not just the branch tips)
624 # this private cache holds all heads (not just the branch tips)
619 self._branchcache = partial
625 self._branchcache = partial
620
626
621 def branchmap(self):
627 def branchmap(self):
622 '''returns a dictionary {branch: [branchheads]}'''
628 '''returns a dictionary {branch: [branchheads]}'''
623 if self.changelog.filteredrevs:
629 if self.changelog.filteredrevs:
624 # some changeset are excluded we can't use the cache
630 # some changeset are excluded we can't use the cache
625 branchmap = {}
631 branchmap = {}
626 self._updatebranchcache(branchmap, (self[r] for r in self))
632 self._updatebranchcache(branchmap, (self[r] for r in self))
627 return branchmap
633 return branchmap
628 else:
634 else:
629 self.updatebranchcache()
635 self.updatebranchcache()
630 return self._branchcache
636 return self._branchcache
631
637
632
638
633 def _branchtip(self, heads):
639 def _branchtip(self, heads):
634 '''return the tipmost branch head in heads'''
640 '''return the tipmost branch head in heads'''
635 tip = heads[-1]
641 tip = heads[-1]
636 for h in reversed(heads):
642 for h in reversed(heads):
637 if not self[h].closesbranch():
643 if not self[h].closesbranch():
638 tip = h
644 tip = h
639 break
645 break
640 return tip
646 return tip
641
647
642 def branchtip(self, branch):
648 def branchtip(self, branch):
643 '''return the tip node for a given branch'''
649 '''return the tip node for a given branch'''
644 if branch not in self.branchmap():
650 if branch not in self.branchmap():
645 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
651 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
646 return self._branchtip(self.branchmap()[branch])
652 return self._branchtip(self.branchmap()[branch])
647
653
648 def branchtags(self):
654 def branchtags(self):
649 '''return a dict where branch names map to the tipmost head of
655 '''return a dict where branch names map to the tipmost head of
650 the branch, open heads come before closed'''
656 the branch, open heads come before closed'''
651 bt = {}
657 bt = {}
652 for bn, heads in self.branchmap().iteritems():
658 for bn, heads in self.branchmap().iteritems():
653 bt[bn] = self._branchtip(heads)
659 bt[bn] = self._branchtip(heads)
654 return bt
660 return bt
655
661
656 def _readbranchcache(self):
662 def _readbranchcache(self):
657 partial = {}
663 partial = {}
658 try:
664 try:
659 f = self.opener("cache/branchheads")
665 f = self.opener("cache/branchheads")
660 lines = f.read().split('\n')
666 lines = f.read().split('\n')
661 f.close()
667 f.close()
662 except (IOError, OSError):
668 except (IOError, OSError):
663 return {}, nullid, nullrev
669 return {}, nullid, nullrev
664
670
665 try:
671 try:
666 last, lrev = lines.pop(0).split(" ", 1)
672 last, lrev = lines.pop(0).split(" ", 1)
667 last, lrev = bin(last), int(lrev)
673 last, lrev = bin(last), int(lrev)
668 if lrev >= len(self) or self[lrev].node() != last:
674 if lrev >= len(self) or self[lrev].node() != last:
669 # invalidate the cache
675 # invalidate the cache
670 raise ValueError('invalidating branch cache (tip differs)')
676 raise ValueError('invalidating branch cache (tip differs)')
671 for l in lines:
677 for l in lines:
672 if not l:
678 if not l:
673 continue
679 continue
674 node, label = l.split(" ", 1)
680 node, label = l.split(" ", 1)
675 label = encoding.tolocal(label.strip())
681 label = encoding.tolocal(label.strip())
676 if not node in self:
682 if not node in self:
677 raise ValueError('invalidating branch cache because node '+
683 raise ValueError('invalidating branch cache because node '+
678 '%s does not exist' % node)
684 '%s does not exist' % node)
679 partial.setdefault(label, []).append(bin(node))
685 partial.setdefault(label, []).append(bin(node))
680 except KeyboardInterrupt:
686 except KeyboardInterrupt:
681 raise
687 raise
682 except Exception, inst:
688 except Exception, inst:
683 if self.ui.debugflag:
689 if self.ui.debugflag:
684 self.ui.warn(str(inst), '\n')
690 self.ui.warn(str(inst), '\n')
685 partial, last, lrev = {}, nullid, nullrev
691 partial, last, lrev = {}, nullid, nullrev
686 return partial, last, lrev
692 return partial, last, lrev
687
693
688 def _writebranchcache(self, branches, tip, tiprev):
694 def _writebranchcache(self, branches, tip, tiprev):
689 try:
695 try:
690 f = self.opener("cache/branchheads", "w", atomictemp=True)
696 f = self.opener("cache/branchheads", "w", atomictemp=True)
691 f.write("%s %s\n" % (hex(tip), tiprev))
697 f.write("%s %s\n" % (hex(tip), tiprev))
692 for label, nodes in branches.iteritems():
698 for label, nodes in branches.iteritems():
693 for node in nodes:
699 for node in nodes:
694 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
700 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
695 f.close()
701 f.close()
696 except (IOError, OSError):
702 except (IOError, OSError):
697 pass
703 pass
698
704
699 def _updatebranchcache(self, partial, ctxgen):
705 def _updatebranchcache(self, partial, ctxgen):
700 """Given a branchhead cache, partial, that may have extra nodes or be
706 """Given a branchhead cache, partial, that may have extra nodes or be
701 missing heads, and a generator of nodes that are at least a superset of
707 missing heads, and a generator of nodes that are at least a superset of
702 heads missing, this function updates partial to be correct.
708 heads missing, this function updates partial to be correct.
703 """
709 """
704 # collect new branch entries
710 # collect new branch entries
705 newbranches = {}
711 newbranches = {}
706 for c in ctxgen:
712 for c in ctxgen:
707 newbranches.setdefault(c.branch(), []).append(c.node())
713 newbranches.setdefault(c.branch(), []).append(c.node())
708 # if older branchheads are reachable from new ones, they aren't
714 # if older branchheads are reachable from new ones, they aren't
709 # really branchheads. Note checking parents is insufficient:
715 # really branchheads. Note checking parents is insufficient:
710 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
716 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
711 for branch, newnodes in newbranches.iteritems():
717 for branch, newnodes in newbranches.iteritems():
712 bheads = partial.setdefault(branch, [])
718 bheads = partial.setdefault(branch, [])
713 # Remove candidate heads that no longer are in the repo (e.g., as
719 # Remove candidate heads that no longer are in the repo (e.g., as
714 # the result of a strip that just happened). Avoid using 'node in
720 # the result of a strip that just happened). Avoid using 'node in
715 # self' here because that dives down into branchcache code somewhat
721 # self' here because that dives down into branchcache code somewhat
716 # recursively.
722 # recursively.
717 bheadrevs = [self.changelog.rev(node) for node in bheads
723 bheadrevs = [self.changelog.rev(node) for node in bheads
718 if self.changelog.hasnode(node)]
724 if self.changelog.hasnode(node)]
719 newheadrevs = [self.changelog.rev(node) for node in newnodes
725 newheadrevs = [self.changelog.rev(node) for node in newnodes
720 if self.changelog.hasnode(node)]
726 if self.changelog.hasnode(node)]
721 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
727 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
722 # Remove duplicates - nodes that are in newheadrevs and are already
728 # Remove duplicates - nodes that are in newheadrevs and are already
723 # in bheadrevs. This can happen if you strip a node whose parent
729 # in bheadrevs. This can happen if you strip a node whose parent
724 # was already a head (because they're on different branches).
730 # was already a head (because they're on different branches).
725 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
731 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
726
732
727 # Starting from tip means fewer passes over reachable. If we know
733 # Starting from tip means fewer passes over reachable. If we know
728 # the new candidates are not ancestors of existing heads, we don't
734 # the new candidates are not ancestors of existing heads, we don't
729 # have to examine ancestors of existing heads
735 # have to examine ancestors of existing heads
730 if ctxisnew:
736 if ctxisnew:
731 iterrevs = sorted(newheadrevs)
737 iterrevs = sorted(newheadrevs)
732 else:
738 else:
733 iterrevs = list(bheadrevs)
739 iterrevs = list(bheadrevs)
734
740
735 # This loop prunes out two kinds of heads - heads that are
741 # This loop prunes out two kinds of heads - heads that are
736 # superseded by a head in newheadrevs, and newheadrevs that are not
742 # superseded by a head in newheadrevs, and newheadrevs that are not
737 # heads because an existing head is their descendant.
743 # heads because an existing head is their descendant.
738 while iterrevs:
744 while iterrevs:
739 latest = iterrevs.pop()
745 latest = iterrevs.pop()
740 if latest not in bheadrevs:
746 if latest not in bheadrevs:
741 continue
747 continue
742 ancestors = set(self.changelog.ancestors([latest],
748 ancestors = set(self.changelog.ancestors([latest],
743 bheadrevs[0]))
749 bheadrevs[0]))
744 if ancestors:
750 if ancestors:
745 bheadrevs = [b for b in bheadrevs if b not in ancestors]
751 bheadrevs = [b for b in bheadrevs if b not in ancestors]
746 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
752 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
747
753
748 # There may be branches that cease to exist when the last commit in the
754 # There may be branches that cease to exist when the last commit in the
749 # branch was stripped. This code filters them out. Note that the
755 # branch was stripped. This code filters them out. Note that the
750 # branch that ceased to exist may not be in newbranches because
756 # branch that ceased to exist may not be in newbranches because
751 # newbranches is the set of candidate heads, which when you strip the
757 # newbranches is the set of candidate heads, which when you strip the
752 # last commit in a branch will be the parent branch.
758 # last commit in a branch will be the parent branch.
753 for branch in partial.keys():
759 for branch in partial.keys():
754 nodes = [head for head in partial[branch]
760 nodes = [head for head in partial[branch]
755 if self.changelog.hasnode(head)]
761 if self.changelog.hasnode(head)]
756 if not nodes:
762 if not nodes:
757 del partial[branch]
763 del partial[branch]
758
764
759 def lookup(self, key):
765 def lookup(self, key):
760 return self[key].node()
766 return self[key].node()
761
767
762 def lookupbranch(self, key, remote=None):
768 def lookupbranch(self, key, remote=None):
763 repo = remote or self
769 repo = remote or self
764 if key in repo.branchmap():
770 if key in repo.branchmap():
765 return key
771 return key
766
772
767 repo = (remote and remote.local()) and remote or self
773 repo = (remote and remote.local()) and remote or self
768 return repo[key].branch()
774 return repo[key].branch()
769
775
770 def known(self, nodes):
776 def known(self, nodes):
771 nm = self.changelog.nodemap
777 nm = self.changelog.nodemap
772 pc = self._phasecache
778 pc = self._phasecache
773 result = []
779 result = []
774 for n in nodes:
780 for n in nodes:
775 r = nm.get(n)
781 r = nm.get(n)
776 resp = not (r is None or pc.phase(self, r) >= phases.secret)
782 resp = not (r is None or pc.phase(self, r) >= phases.secret)
777 result.append(resp)
783 result.append(resp)
778 return result
784 return result
779
785
780 def local(self):
786 def local(self):
781 return self
787 return self
782
788
783 def cancopy(self):
789 def cancopy(self):
784 return self.local() # so statichttprepo's override of local() works
790 return self.local() # so statichttprepo's override of local() works
785
791
786 def join(self, f):
792 def join(self, f):
787 return os.path.join(self.path, f)
793 return os.path.join(self.path, f)
788
794
789 def wjoin(self, f):
795 def wjoin(self, f):
790 return os.path.join(self.root, f)
796 return os.path.join(self.root, f)
791
797
792 def file(self, f):
798 def file(self, f):
793 if f[0] == '/':
799 if f[0] == '/':
794 f = f[1:]
800 f = f[1:]
795 return filelog.filelog(self.sopener, f)
801 return filelog.filelog(self.sopener, f)
796
802
797 def changectx(self, changeid):
803 def changectx(self, changeid):
798 return self[changeid]
804 return self[changeid]
799
805
800 def parents(self, changeid=None):
806 def parents(self, changeid=None):
801 '''get list of changectxs for parents of changeid'''
807 '''get list of changectxs for parents of changeid'''
802 return self[changeid].parents()
808 return self[changeid].parents()
803
809
804 def setparents(self, p1, p2=nullid):
810 def setparents(self, p1, p2=nullid):
805 copies = self.dirstate.setparents(p1, p2)
811 copies = self.dirstate.setparents(p1, p2)
806 if copies:
812 if copies:
807 # Adjust copy records, the dirstate cannot do it, it
813 # Adjust copy records, the dirstate cannot do it, it
808 # requires access to parents manifests. Preserve them
814 # requires access to parents manifests. Preserve them
809 # only for entries added to first parent.
815 # only for entries added to first parent.
810 pctx = self[p1]
816 pctx = self[p1]
811 for f in copies:
817 for f in copies:
812 if f not in pctx and copies[f] in pctx:
818 if f not in pctx and copies[f] in pctx:
813 self.dirstate.copy(copies[f], f)
819 self.dirstate.copy(copies[f], f)
814
820
815 def filectx(self, path, changeid=None, fileid=None):
821 def filectx(self, path, changeid=None, fileid=None):
816 """changeid can be a changeset revision, node, or tag.
822 """changeid can be a changeset revision, node, or tag.
817 fileid can be a file revision or node."""
823 fileid can be a file revision or node."""
818 return context.filectx(self, path, changeid, fileid)
824 return context.filectx(self, path, changeid, fileid)
819
825
820 def getcwd(self):
826 def getcwd(self):
821 return self.dirstate.getcwd()
827 return self.dirstate.getcwd()
822
828
823 def pathto(self, f, cwd=None):
829 def pathto(self, f, cwd=None):
824 return self.dirstate.pathto(f, cwd)
830 return self.dirstate.pathto(f, cwd)
825
831
826 def wfile(self, f, mode='r'):
832 def wfile(self, f, mode='r'):
827 return self.wopener(f, mode)
833 return self.wopener(f, mode)
828
834
829 def _link(self, f):
835 def _link(self, f):
830 return os.path.islink(self.wjoin(f))
836 return os.path.islink(self.wjoin(f))
831
837
832 def _loadfilter(self, filter):
838 def _loadfilter(self, filter):
833 if filter not in self.filterpats:
839 if filter not in self.filterpats:
834 l = []
840 l = []
835 for pat, cmd in self.ui.configitems(filter):
841 for pat, cmd in self.ui.configitems(filter):
836 if cmd == '!':
842 if cmd == '!':
837 continue
843 continue
838 mf = matchmod.match(self.root, '', [pat])
844 mf = matchmod.match(self.root, '', [pat])
839 fn = None
845 fn = None
840 params = cmd
846 params = cmd
841 for name, filterfn in self._datafilters.iteritems():
847 for name, filterfn in self._datafilters.iteritems():
842 if cmd.startswith(name):
848 if cmd.startswith(name):
843 fn = filterfn
849 fn = filterfn
844 params = cmd[len(name):].lstrip()
850 params = cmd[len(name):].lstrip()
845 break
851 break
846 if not fn:
852 if not fn:
847 fn = lambda s, c, **kwargs: util.filter(s, c)
853 fn = lambda s, c, **kwargs: util.filter(s, c)
848 # Wrap old filters not supporting keyword arguments
854 # Wrap old filters not supporting keyword arguments
849 if not inspect.getargspec(fn)[2]:
855 if not inspect.getargspec(fn)[2]:
850 oldfn = fn
856 oldfn = fn
851 fn = lambda s, c, **kwargs: oldfn(s, c)
857 fn = lambda s, c, **kwargs: oldfn(s, c)
852 l.append((mf, fn, params))
858 l.append((mf, fn, params))
853 self.filterpats[filter] = l
859 self.filterpats[filter] = l
854 return self.filterpats[filter]
860 return self.filterpats[filter]
855
861
856 def _filter(self, filterpats, filename, data):
862 def _filter(self, filterpats, filename, data):
857 for mf, fn, cmd in filterpats:
863 for mf, fn, cmd in filterpats:
858 if mf(filename):
864 if mf(filename):
859 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
865 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
860 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
866 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
861 break
867 break
862
868
863 return data
869 return data
864
870
865 @propertycache
871 @propertycache
866 def _encodefilterpats(self):
872 def _encodefilterpats(self):
867 return self._loadfilter('encode')
873 return self._loadfilter('encode')
868
874
869 @propertycache
875 @propertycache
870 def _decodefilterpats(self):
876 def _decodefilterpats(self):
871 return self._loadfilter('decode')
877 return self._loadfilter('decode')
872
878
873 def adddatafilter(self, name, filter):
879 def adddatafilter(self, name, filter):
874 self._datafilters[name] = filter
880 self._datafilters[name] = filter
875
881
876 def wread(self, filename):
882 def wread(self, filename):
877 if self._link(filename):
883 if self._link(filename):
878 data = os.readlink(self.wjoin(filename))
884 data = os.readlink(self.wjoin(filename))
879 else:
885 else:
880 data = self.wopener.read(filename)
886 data = self.wopener.read(filename)
881 return self._filter(self._encodefilterpats, filename, data)
887 return self._filter(self._encodefilterpats, filename, data)
882
888
883 def wwrite(self, filename, data, flags):
889 def wwrite(self, filename, data, flags):
884 data = self._filter(self._decodefilterpats, filename, data)
890 data = self._filter(self._decodefilterpats, filename, data)
885 if 'l' in flags:
891 if 'l' in flags:
886 self.wopener.symlink(data, filename)
892 self.wopener.symlink(data, filename)
887 else:
893 else:
888 self.wopener.write(filename, data)
894 self.wopener.write(filename, data)
889 if 'x' in flags:
895 if 'x' in flags:
890 util.setflags(self.wjoin(filename), False, True)
896 util.setflags(self.wjoin(filename), False, True)
891
897
892 def wwritedata(self, filename, data):
898 def wwritedata(self, filename, data):
893 return self._filter(self._decodefilterpats, filename, data)
899 return self._filter(self._decodefilterpats, filename, data)
894
900
895 def transaction(self, desc):
901 def transaction(self, desc):
896 tr = self._transref and self._transref() or None
902 tr = self._transref and self._transref() or None
897 if tr and tr.running():
903 if tr and tr.running():
898 return tr.nest()
904 return tr.nest()
899
905
900 # abort here if the journal already exists
906 # abort here if the journal already exists
901 if os.path.exists(self.sjoin("journal")):
907 if os.path.exists(self.sjoin("journal")):
902 raise error.RepoError(
908 raise error.RepoError(
903 _("abandoned transaction found - run hg recover"))
909 _("abandoned transaction found - run hg recover"))
904
910
905 self._writejournal(desc)
911 self._writejournal(desc)
906 renames = [(x, undoname(x)) for x in self._journalfiles()]
912 renames = [(x, undoname(x)) for x in self._journalfiles()]
907
913
908 tr = transaction.transaction(self.ui.warn, self.sopener,
914 tr = transaction.transaction(self.ui.warn, self.sopener,
909 self.sjoin("journal"),
915 self.sjoin("journal"),
910 aftertrans(renames),
916 aftertrans(renames),
911 self.store.createmode)
917 self.store.createmode)
912 self._transref = weakref.ref(tr)
918 self._transref = weakref.ref(tr)
913 return tr
919 return tr
914
920
915 def _journalfiles(self):
921 def _journalfiles(self):
916 return (self.sjoin('journal'), self.join('journal.dirstate'),
922 return (self.sjoin('journal'), self.join('journal.dirstate'),
917 self.join('journal.branch'), self.join('journal.desc'),
923 self.join('journal.branch'), self.join('journal.desc'),
918 self.join('journal.bookmarks'),
924 self.join('journal.bookmarks'),
919 self.sjoin('journal.phaseroots'))
925 self.sjoin('journal.phaseroots'))
920
926
921 def undofiles(self):
927 def undofiles(self):
922 return [undoname(x) for x in self._journalfiles()]
928 return [undoname(x) for x in self._journalfiles()]
923
929
924 def _writejournal(self, desc):
930 def _writejournal(self, desc):
925 self.opener.write("journal.dirstate",
931 self.opener.write("journal.dirstate",
926 self.opener.tryread("dirstate"))
932 self.opener.tryread("dirstate"))
927 self.opener.write("journal.branch",
933 self.opener.write("journal.branch",
928 encoding.fromlocal(self.dirstate.branch()))
934 encoding.fromlocal(self.dirstate.branch()))
929 self.opener.write("journal.desc",
935 self.opener.write("journal.desc",
930 "%d\n%s\n" % (len(self), desc))
936 "%d\n%s\n" % (len(self), desc))
931 self.opener.write("journal.bookmarks",
937 self.opener.write("journal.bookmarks",
932 self.opener.tryread("bookmarks"))
938 self.opener.tryread("bookmarks"))
933 self.sopener.write("journal.phaseroots",
939 self.sopener.write("journal.phaseroots",
934 self.sopener.tryread("phaseroots"))
940 self.sopener.tryread("phaseroots"))
935
941
936 def recover(self):
942 def recover(self):
937 lock = self.lock()
943 lock = self.lock()
938 try:
944 try:
939 if os.path.exists(self.sjoin("journal")):
945 if os.path.exists(self.sjoin("journal")):
940 self.ui.status(_("rolling back interrupted transaction\n"))
946 self.ui.status(_("rolling back interrupted transaction\n"))
941 transaction.rollback(self.sopener, self.sjoin("journal"),
947 transaction.rollback(self.sopener, self.sjoin("journal"),
942 self.ui.warn)
948 self.ui.warn)
943 self.invalidate()
949 self.invalidate()
944 return True
950 return True
945 else:
951 else:
946 self.ui.warn(_("no interrupted transaction available\n"))
952 self.ui.warn(_("no interrupted transaction available\n"))
947 return False
953 return False
948 finally:
954 finally:
949 lock.release()
955 lock.release()
950
956
951 def rollback(self, dryrun=False, force=False):
957 def rollback(self, dryrun=False, force=False):
952 wlock = lock = None
958 wlock = lock = None
953 try:
959 try:
954 wlock = self.wlock()
960 wlock = self.wlock()
955 lock = self.lock()
961 lock = self.lock()
956 if os.path.exists(self.sjoin("undo")):
962 if os.path.exists(self.sjoin("undo")):
957 return self._rollback(dryrun, force)
963 return self._rollback(dryrun, force)
958 else:
964 else:
959 self.ui.warn(_("no rollback information available\n"))
965 self.ui.warn(_("no rollback information available\n"))
960 return 1
966 return 1
961 finally:
967 finally:
962 release(lock, wlock)
968 release(lock, wlock)
963
969
964 def _rollback(self, dryrun, force):
970 def _rollback(self, dryrun, force):
965 ui = self.ui
971 ui = self.ui
966 try:
972 try:
967 args = self.opener.read('undo.desc').splitlines()
973 args = self.opener.read('undo.desc').splitlines()
968 (oldlen, desc, detail) = (int(args[0]), args[1], None)
974 (oldlen, desc, detail) = (int(args[0]), args[1], None)
969 if len(args) >= 3:
975 if len(args) >= 3:
970 detail = args[2]
976 detail = args[2]
971 oldtip = oldlen - 1
977 oldtip = oldlen - 1
972
978
973 if detail and ui.verbose:
979 if detail and ui.verbose:
974 msg = (_('repository tip rolled back to revision %s'
980 msg = (_('repository tip rolled back to revision %s'
975 ' (undo %s: %s)\n')
981 ' (undo %s: %s)\n')
976 % (oldtip, desc, detail))
982 % (oldtip, desc, detail))
977 else:
983 else:
978 msg = (_('repository tip rolled back to revision %s'
984 msg = (_('repository tip rolled back to revision %s'
979 ' (undo %s)\n')
985 ' (undo %s)\n')
980 % (oldtip, desc))
986 % (oldtip, desc))
981 except IOError:
987 except IOError:
982 msg = _('rolling back unknown transaction\n')
988 msg = _('rolling back unknown transaction\n')
983 desc = None
989 desc = None
984
990
985 if not force and self['.'] != self['tip'] and desc == 'commit':
991 if not force and self['.'] != self['tip'] and desc == 'commit':
986 raise util.Abort(
992 raise util.Abort(
987 _('rollback of last commit while not checked out '
993 _('rollback of last commit while not checked out '
988 'may lose data'), hint=_('use -f to force'))
994 'may lose data'), hint=_('use -f to force'))
989
995
990 ui.status(msg)
996 ui.status(msg)
991 if dryrun:
997 if dryrun:
992 return 0
998 return 0
993
999
994 parents = self.dirstate.parents()
1000 parents = self.dirstate.parents()
995 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1001 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
996 if os.path.exists(self.join('undo.bookmarks')):
1002 if os.path.exists(self.join('undo.bookmarks')):
997 util.rename(self.join('undo.bookmarks'),
1003 util.rename(self.join('undo.bookmarks'),
998 self.join('bookmarks'))
1004 self.join('bookmarks'))
999 if os.path.exists(self.sjoin('undo.phaseroots')):
1005 if os.path.exists(self.sjoin('undo.phaseroots')):
1000 util.rename(self.sjoin('undo.phaseroots'),
1006 util.rename(self.sjoin('undo.phaseroots'),
1001 self.sjoin('phaseroots'))
1007 self.sjoin('phaseroots'))
1002 self.invalidate()
1008 self.invalidate()
1003
1009
1004 # Discard all cache entries to force reloading everything.
1010 # Discard all cache entries to force reloading everything.
1005 self._filecache.clear()
1011 self._filecache.clear()
1006
1012
1007 parentgone = (parents[0] not in self.changelog.nodemap or
1013 parentgone = (parents[0] not in self.changelog.nodemap or
1008 parents[1] not in self.changelog.nodemap)
1014 parents[1] not in self.changelog.nodemap)
1009 if parentgone:
1015 if parentgone:
1010 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1016 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1011 try:
1017 try:
1012 branch = self.opener.read('undo.branch')
1018 branch = self.opener.read('undo.branch')
1013 self.dirstate.setbranch(encoding.tolocal(branch))
1019 self.dirstate.setbranch(encoding.tolocal(branch))
1014 except IOError:
1020 except IOError:
1015 ui.warn(_('named branch could not be reset: '
1021 ui.warn(_('named branch could not be reset: '
1016 'current branch is still \'%s\'\n')
1022 'current branch is still \'%s\'\n')
1017 % self.dirstate.branch())
1023 % self.dirstate.branch())
1018
1024
1019 self.dirstate.invalidate()
1025 self.dirstate.invalidate()
1020 parents = tuple([p.rev() for p in self.parents()])
1026 parents = tuple([p.rev() for p in self.parents()])
1021 if len(parents) > 1:
1027 if len(parents) > 1:
1022 ui.status(_('working directory now based on '
1028 ui.status(_('working directory now based on '
1023 'revisions %d and %d\n') % parents)
1029 'revisions %d and %d\n') % parents)
1024 else:
1030 else:
1025 ui.status(_('working directory now based on '
1031 ui.status(_('working directory now based on '
1026 'revision %d\n') % parents)
1032 'revision %d\n') % parents)
1027 # TODO: if we know which new heads may result from this rollback, pass
1033 # TODO: if we know which new heads may result from this rollback, pass
1028 # them to destroy(), which will prevent the branchhead cache from being
1034 # them to destroy(), which will prevent the branchhead cache from being
1029 # invalidated.
1035 # invalidated.
1030 self.destroyed()
1036 self.destroyed()
1031 return 0
1037 return 0
1032
1038
1033 def invalidatecaches(self):
1039 def invalidatecaches(self):
1034 def delcache(name):
1040 def delcache(name):
1035 try:
1041 try:
1036 delattr(self, name)
1042 delattr(self, name)
1037 except AttributeError:
1043 except AttributeError:
1038 pass
1044 pass
1039
1045
1040 delcache('_tagscache')
1046 delcache('_tagscache')
1041
1047
1042 self._branchcache = None # in UTF-8
1048 self._branchcache = None # in UTF-8
1043 self._branchcachetip = None
1049 self._branchcachetip = None
1044 obsolete.clearobscaches(self)
1050 obsolete.clearobscaches(self)
1045
1051
1046 def invalidatedirstate(self):
1052 def invalidatedirstate(self):
1047 '''Invalidates the dirstate, causing the next call to dirstate
1053 '''Invalidates the dirstate, causing the next call to dirstate
1048 to check if it was modified since the last time it was read,
1054 to check if it was modified since the last time it was read,
1049 rereading it if it has.
1055 rereading it if it has.
1050
1056
1051 This is different to dirstate.invalidate() that it doesn't always
1057 This is different to dirstate.invalidate() that it doesn't always
1052 rereads the dirstate. Use dirstate.invalidate() if you want to
1058 rereads the dirstate. Use dirstate.invalidate() if you want to
1053 explicitly read the dirstate again (i.e. restoring it to a previous
1059 explicitly read the dirstate again (i.e. restoring it to a previous
1054 known good state).'''
1060 known good state).'''
1055 if 'dirstate' in self.__dict__:
1061 if 'dirstate' in self.__dict__:
1056 for k in self.dirstate._filecache:
1062 for k in self.dirstate._filecache:
1057 try:
1063 try:
1058 delattr(self.dirstate, k)
1064 delattr(self.dirstate, k)
1059 except AttributeError:
1065 except AttributeError:
1060 pass
1066 pass
1061 delattr(self, 'dirstate')
1067 delattr(self, 'dirstate')
1062
1068
1063 def invalidate(self):
1069 def invalidate(self):
1064 for k in self._filecache:
1070 for k in self._filecache:
1065 # dirstate is invalidated separately in invalidatedirstate()
1071 # dirstate is invalidated separately in invalidatedirstate()
1066 if k == 'dirstate':
1072 if k == 'dirstate':
1067 continue
1073 continue
1068
1074
1069 try:
1075 try:
1070 delattr(self, k)
1076 delattr(self, k)
1071 except AttributeError:
1077 except AttributeError:
1072 pass
1078 pass
1073 self.invalidatecaches()
1079 self.invalidatecaches()
1074
1080
1075 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1081 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1076 try:
1082 try:
1077 l = lock.lock(lockname, 0, releasefn, desc=desc)
1083 l = lock.lock(lockname, 0, releasefn, desc=desc)
1078 except error.LockHeld, inst:
1084 except error.LockHeld, inst:
1079 if not wait:
1085 if not wait:
1080 raise
1086 raise
1081 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1087 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1082 (desc, inst.locker))
1088 (desc, inst.locker))
1083 # default to 600 seconds timeout
1089 # default to 600 seconds timeout
1084 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1090 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1085 releasefn, desc=desc)
1091 releasefn, desc=desc)
1086 if acquirefn:
1092 if acquirefn:
1087 acquirefn()
1093 acquirefn()
1088 return l
1094 return l
1089
1095
1090 def _afterlock(self, callback):
1096 def _afterlock(self, callback):
1091 """add a callback to the current repository lock.
1097 """add a callback to the current repository lock.
1092
1098
1093 The callback will be executed on lock release."""
1099 The callback will be executed on lock release."""
1094 l = self._lockref and self._lockref()
1100 l = self._lockref and self._lockref()
1095 if l:
1101 if l:
1096 l.postrelease.append(callback)
1102 l.postrelease.append(callback)
1097 else:
1103 else:
1098 callback()
1104 callback()
1099
1105
1100 def lock(self, wait=True):
1106 def lock(self, wait=True):
1101 '''Lock the repository store (.hg/store) and return a weak reference
1107 '''Lock the repository store (.hg/store) and return a weak reference
1102 to the lock. Use this before modifying the store (e.g. committing or
1108 to the lock. Use this before modifying the store (e.g. committing or
1103 stripping). If you are opening a transaction, get a lock as well.)'''
1109 stripping). If you are opening a transaction, get a lock as well.)'''
1104 l = self._lockref and self._lockref()
1110 l = self._lockref and self._lockref()
1105 if l is not None and l.held:
1111 if l is not None and l.held:
1106 l.lock()
1112 l.lock()
1107 return l
1113 return l
1108
1114
1109 def unlock():
1115 def unlock():
1110 self.store.write()
1116 self.store.write()
1111 if '_phasecache' in vars(self):
1117 if '_phasecache' in vars(self):
1112 self._phasecache.write()
1118 self._phasecache.write()
1113 for k, ce in self._filecache.items():
1119 for k, ce in self._filecache.items():
1114 if k == 'dirstate':
1120 if k == 'dirstate':
1115 continue
1121 continue
1116 ce.refresh()
1122 ce.refresh()
1117
1123
1118 l = self._lock(self.sjoin("lock"), wait, unlock,
1124 l = self._lock(self.sjoin("lock"), wait, unlock,
1119 self.invalidate, _('repository %s') % self.origroot)
1125 self.invalidate, _('repository %s') % self.origroot)
1120 self._lockref = weakref.ref(l)
1126 self._lockref = weakref.ref(l)
1121 return l
1127 return l
1122
1128
1123 def wlock(self, wait=True):
1129 def wlock(self, wait=True):
1124 '''Lock the non-store parts of the repository (everything under
1130 '''Lock the non-store parts of the repository (everything under
1125 .hg except .hg/store) and return a weak reference to the lock.
1131 .hg except .hg/store) and return a weak reference to the lock.
1126 Use this before modifying files in .hg.'''
1132 Use this before modifying files in .hg.'''
1127 l = self._wlockref and self._wlockref()
1133 l = self._wlockref and self._wlockref()
1128 if l is not None and l.held:
1134 if l is not None and l.held:
1129 l.lock()
1135 l.lock()
1130 return l
1136 return l
1131
1137
1132 def unlock():
1138 def unlock():
1133 self.dirstate.write()
1139 self.dirstate.write()
1134 ce = self._filecache.get('dirstate')
1140 ce = self._filecache.get('dirstate')
1135 if ce:
1141 if ce:
1136 ce.refresh()
1142 ce.refresh()
1137
1143
1138 l = self._lock(self.join("wlock"), wait, unlock,
1144 l = self._lock(self.join("wlock"), wait, unlock,
1139 self.invalidatedirstate, _('working directory of %s') %
1145 self.invalidatedirstate, _('working directory of %s') %
1140 self.origroot)
1146 self.origroot)
1141 self._wlockref = weakref.ref(l)
1147 self._wlockref = weakref.ref(l)
1142 return l
1148 return l
1143
1149
1144 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1150 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1145 """
1151 """
1146 commit an individual file as part of a larger transaction
1152 commit an individual file as part of a larger transaction
1147 """
1153 """
1148
1154
1149 fname = fctx.path()
1155 fname = fctx.path()
1150 text = fctx.data()
1156 text = fctx.data()
1151 flog = self.file(fname)
1157 flog = self.file(fname)
1152 fparent1 = manifest1.get(fname, nullid)
1158 fparent1 = manifest1.get(fname, nullid)
1153 fparent2 = fparent2o = manifest2.get(fname, nullid)
1159 fparent2 = fparent2o = manifest2.get(fname, nullid)
1154
1160
1155 meta = {}
1161 meta = {}
1156 copy = fctx.renamed()
1162 copy = fctx.renamed()
1157 if copy and copy[0] != fname:
1163 if copy and copy[0] != fname:
1158 # Mark the new revision of this file as a copy of another
1164 # Mark the new revision of this file as a copy of another
1159 # file. This copy data will effectively act as a parent
1165 # file. This copy data will effectively act as a parent
1160 # of this new revision. If this is a merge, the first
1166 # of this new revision. If this is a merge, the first
1161 # parent will be the nullid (meaning "look up the copy data")
1167 # parent will be the nullid (meaning "look up the copy data")
1162 # and the second one will be the other parent. For example:
1168 # and the second one will be the other parent. For example:
1163 #
1169 #
1164 # 0 --- 1 --- 3 rev1 changes file foo
1170 # 0 --- 1 --- 3 rev1 changes file foo
1165 # \ / rev2 renames foo to bar and changes it
1171 # \ / rev2 renames foo to bar and changes it
1166 # \- 2 -/ rev3 should have bar with all changes and
1172 # \- 2 -/ rev3 should have bar with all changes and
1167 # should record that bar descends from
1173 # should record that bar descends from
1168 # bar in rev2 and foo in rev1
1174 # bar in rev2 and foo in rev1
1169 #
1175 #
1170 # this allows this merge to succeed:
1176 # this allows this merge to succeed:
1171 #
1177 #
1172 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1178 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1173 # \ / merging rev3 and rev4 should use bar@rev2
1179 # \ / merging rev3 and rev4 should use bar@rev2
1174 # \- 2 --- 4 as the merge base
1180 # \- 2 --- 4 as the merge base
1175 #
1181 #
1176
1182
1177 cfname = copy[0]
1183 cfname = copy[0]
1178 crev = manifest1.get(cfname)
1184 crev = manifest1.get(cfname)
1179 newfparent = fparent2
1185 newfparent = fparent2
1180
1186
1181 if manifest2: # branch merge
1187 if manifest2: # branch merge
1182 if fparent2 == nullid or crev is None: # copied on remote side
1188 if fparent2 == nullid or crev is None: # copied on remote side
1183 if cfname in manifest2:
1189 if cfname in manifest2:
1184 crev = manifest2[cfname]
1190 crev = manifest2[cfname]
1185 newfparent = fparent1
1191 newfparent = fparent1
1186
1192
1187 # find source in nearest ancestor if we've lost track
1193 # find source in nearest ancestor if we've lost track
1188 if not crev:
1194 if not crev:
1189 self.ui.debug(" %s: searching for copy revision for %s\n" %
1195 self.ui.debug(" %s: searching for copy revision for %s\n" %
1190 (fname, cfname))
1196 (fname, cfname))
1191 for ancestor in self[None].ancestors():
1197 for ancestor in self[None].ancestors():
1192 if cfname in ancestor:
1198 if cfname in ancestor:
1193 crev = ancestor[cfname].filenode()
1199 crev = ancestor[cfname].filenode()
1194 break
1200 break
1195
1201
1196 if crev:
1202 if crev:
1197 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1203 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1198 meta["copy"] = cfname
1204 meta["copy"] = cfname
1199 meta["copyrev"] = hex(crev)
1205 meta["copyrev"] = hex(crev)
1200 fparent1, fparent2 = nullid, newfparent
1206 fparent1, fparent2 = nullid, newfparent
1201 else:
1207 else:
1202 self.ui.warn(_("warning: can't find ancestor for '%s' "
1208 self.ui.warn(_("warning: can't find ancestor for '%s' "
1203 "copied from '%s'!\n") % (fname, cfname))
1209 "copied from '%s'!\n") % (fname, cfname))
1204
1210
1205 elif fparent2 != nullid:
1211 elif fparent2 != nullid:
1206 # is one parent an ancestor of the other?
1212 # is one parent an ancestor of the other?
1207 fparentancestor = flog.ancestor(fparent1, fparent2)
1213 fparentancestor = flog.ancestor(fparent1, fparent2)
1208 if fparentancestor == fparent1:
1214 if fparentancestor == fparent1:
1209 fparent1, fparent2 = fparent2, nullid
1215 fparent1, fparent2 = fparent2, nullid
1210 elif fparentancestor == fparent2:
1216 elif fparentancestor == fparent2:
1211 fparent2 = nullid
1217 fparent2 = nullid
1212
1218
1213 # is the file changed?
1219 # is the file changed?
1214 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1220 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1215 changelist.append(fname)
1221 changelist.append(fname)
1216 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1222 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1217
1223
1218 # are just the flags changed during merge?
1224 # are just the flags changed during merge?
1219 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1225 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1220 changelist.append(fname)
1226 changelist.append(fname)
1221
1227
1222 return fparent1
1228 return fparent1
1223
1229
1224 def commit(self, text="", user=None, date=None, match=None, force=False,
1230 def commit(self, text="", user=None, date=None, match=None, force=False,
1225 editor=False, extra={}):
1231 editor=False, extra={}):
1226 """Add a new revision to current repository.
1232 """Add a new revision to current repository.
1227
1233
1228 Revision information is gathered from the working directory,
1234 Revision information is gathered from the working directory,
1229 match can be used to filter the committed files. If editor is
1235 match can be used to filter the committed files. If editor is
1230 supplied, it is called to get a commit message.
1236 supplied, it is called to get a commit message.
1231 """
1237 """
1232
1238
1233 def fail(f, msg):
1239 def fail(f, msg):
1234 raise util.Abort('%s: %s' % (f, msg))
1240 raise util.Abort('%s: %s' % (f, msg))
1235
1241
1236 if not match:
1242 if not match:
1237 match = matchmod.always(self.root, '')
1243 match = matchmod.always(self.root, '')
1238
1244
1239 if not force:
1245 if not force:
1240 vdirs = []
1246 vdirs = []
1241 match.dir = vdirs.append
1247 match.dir = vdirs.append
1242 match.bad = fail
1248 match.bad = fail
1243
1249
1244 wlock = self.wlock()
1250 wlock = self.wlock()
1245 try:
1251 try:
1246 wctx = self[None]
1252 wctx = self[None]
1247 merge = len(wctx.parents()) > 1
1253 merge = len(wctx.parents()) > 1
1248
1254
1249 if (not force and merge and match and
1255 if (not force and merge and match and
1250 (match.files() or match.anypats())):
1256 (match.files() or match.anypats())):
1251 raise util.Abort(_('cannot partially commit a merge '
1257 raise util.Abort(_('cannot partially commit a merge '
1252 '(do not specify files or patterns)'))
1258 '(do not specify files or patterns)'))
1253
1259
1254 changes = self.status(match=match, clean=force)
1260 changes = self.status(match=match, clean=force)
1255 if force:
1261 if force:
1256 changes[0].extend(changes[6]) # mq may commit unchanged files
1262 changes[0].extend(changes[6]) # mq may commit unchanged files
1257
1263
1258 # check subrepos
1264 # check subrepos
1259 subs = []
1265 subs = []
1260 commitsubs = set()
1266 commitsubs = set()
1261 newstate = wctx.substate.copy()
1267 newstate = wctx.substate.copy()
1262 # only manage subrepos and .hgsubstate if .hgsub is present
1268 # only manage subrepos and .hgsubstate if .hgsub is present
1263 if '.hgsub' in wctx:
1269 if '.hgsub' in wctx:
1264 # we'll decide whether to track this ourselves, thanks
1270 # we'll decide whether to track this ourselves, thanks
1265 if '.hgsubstate' in changes[0]:
1271 if '.hgsubstate' in changes[0]:
1266 changes[0].remove('.hgsubstate')
1272 changes[0].remove('.hgsubstate')
1267 if '.hgsubstate' in changes[2]:
1273 if '.hgsubstate' in changes[2]:
1268 changes[2].remove('.hgsubstate')
1274 changes[2].remove('.hgsubstate')
1269
1275
1270 # compare current state to last committed state
1276 # compare current state to last committed state
1271 # build new substate based on last committed state
1277 # build new substate based on last committed state
1272 oldstate = wctx.p1().substate
1278 oldstate = wctx.p1().substate
1273 for s in sorted(newstate.keys()):
1279 for s in sorted(newstate.keys()):
1274 if not match(s):
1280 if not match(s):
1275 # ignore working copy, use old state if present
1281 # ignore working copy, use old state if present
1276 if s in oldstate:
1282 if s in oldstate:
1277 newstate[s] = oldstate[s]
1283 newstate[s] = oldstate[s]
1278 continue
1284 continue
1279 if not force:
1285 if not force:
1280 raise util.Abort(
1286 raise util.Abort(
1281 _("commit with new subrepo %s excluded") % s)
1287 _("commit with new subrepo %s excluded") % s)
1282 if wctx.sub(s).dirty(True):
1288 if wctx.sub(s).dirty(True):
1283 if not self.ui.configbool('ui', 'commitsubrepos'):
1289 if not self.ui.configbool('ui', 'commitsubrepos'):
1284 raise util.Abort(
1290 raise util.Abort(
1285 _("uncommitted changes in subrepo %s") % s,
1291 _("uncommitted changes in subrepo %s") % s,
1286 hint=_("use --subrepos for recursive commit"))
1292 hint=_("use --subrepos for recursive commit"))
1287 subs.append(s)
1293 subs.append(s)
1288 commitsubs.add(s)
1294 commitsubs.add(s)
1289 else:
1295 else:
1290 bs = wctx.sub(s).basestate()
1296 bs = wctx.sub(s).basestate()
1291 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1297 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1292 if oldstate.get(s, (None, None, None))[1] != bs:
1298 if oldstate.get(s, (None, None, None))[1] != bs:
1293 subs.append(s)
1299 subs.append(s)
1294
1300
1295 # check for removed subrepos
1301 # check for removed subrepos
1296 for p in wctx.parents():
1302 for p in wctx.parents():
1297 r = [s for s in p.substate if s not in newstate]
1303 r = [s for s in p.substate if s not in newstate]
1298 subs += [s for s in r if match(s)]
1304 subs += [s for s in r if match(s)]
1299 if subs:
1305 if subs:
1300 if (not match('.hgsub') and
1306 if (not match('.hgsub') and
1301 '.hgsub' in (wctx.modified() + wctx.added())):
1307 '.hgsub' in (wctx.modified() + wctx.added())):
1302 raise util.Abort(
1308 raise util.Abort(
1303 _("can't commit subrepos without .hgsub"))
1309 _("can't commit subrepos without .hgsub"))
1304 changes[0].insert(0, '.hgsubstate')
1310 changes[0].insert(0, '.hgsubstate')
1305
1311
1306 elif '.hgsub' in changes[2]:
1312 elif '.hgsub' in changes[2]:
1307 # clean up .hgsubstate when .hgsub is removed
1313 # clean up .hgsubstate when .hgsub is removed
1308 if ('.hgsubstate' in wctx and
1314 if ('.hgsubstate' in wctx and
1309 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1315 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1310 changes[2].insert(0, '.hgsubstate')
1316 changes[2].insert(0, '.hgsubstate')
1311
1317
1312 # make sure all explicit patterns are matched
1318 # make sure all explicit patterns are matched
1313 if not force and match.files():
1319 if not force and match.files():
1314 matched = set(changes[0] + changes[1] + changes[2])
1320 matched = set(changes[0] + changes[1] + changes[2])
1315
1321
1316 for f in match.files():
1322 for f in match.files():
1317 f = self.dirstate.normalize(f)
1323 f = self.dirstate.normalize(f)
1318 if f == '.' or f in matched or f in wctx.substate:
1324 if f == '.' or f in matched or f in wctx.substate:
1319 continue
1325 continue
1320 if f in changes[3]: # missing
1326 if f in changes[3]: # missing
1321 fail(f, _('file not found!'))
1327 fail(f, _('file not found!'))
1322 if f in vdirs: # visited directory
1328 if f in vdirs: # visited directory
1323 d = f + '/'
1329 d = f + '/'
1324 for mf in matched:
1330 for mf in matched:
1325 if mf.startswith(d):
1331 if mf.startswith(d):
1326 break
1332 break
1327 else:
1333 else:
1328 fail(f, _("no match under directory!"))
1334 fail(f, _("no match under directory!"))
1329 elif f not in self.dirstate:
1335 elif f not in self.dirstate:
1330 fail(f, _("file not tracked!"))
1336 fail(f, _("file not tracked!"))
1331
1337
1332 if (not force and not extra.get("close") and not merge
1338 if (not force and not extra.get("close") and not merge
1333 and not (changes[0] or changes[1] or changes[2])
1339 and not (changes[0] or changes[1] or changes[2])
1334 and wctx.branch() == wctx.p1().branch()):
1340 and wctx.branch() == wctx.p1().branch()):
1335 return None
1341 return None
1336
1342
1337 if merge and changes[3]:
1343 if merge and changes[3]:
1338 raise util.Abort(_("cannot commit merge with missing files"))
1344 raise util.Abort(_("cannot commit merge with missing files"))
1339
1345
1340 ms = mergemod.mergestate(self)
1346 ms = mergemod.mergestate(self)
1341 for f in changes[0]:
1347 for f in changes[0]:
1342 if f in ms and ms[f] == 'u':
1348 if f in ms and ms[f] == 'u':
1343 raise util.Abort(_("unresolved merge conflicts "
1349 raise util.Abort(_("unresolved merge conflicts "
1344 "(see hg help resolve)"))
1350 "(see hg help resolve)"))
1345
1351
1346 cctx = context.workingctx(self, text, user, date, extra, changes)
1352 cctx = context.workingctx(self, text, user, date, extra, changes)
1347 if editor:
1353 if editor:
1348 cctx._text = editor(self, cctx, subs)
1354 cctx._text = editor(self, cctx, subs)
1349 edited = (text != cctx._text)
1355 edited = (text != cctx._text)
1350
1356
1351 # commit subs and write new state
1357 # commit subs and write new state
1352 if subs:
1358 if subs:
1353 for s in sorted(commitsubs):
1359 for s in sorted(commitsubs):
1354 sub = wctx.sub(s)
1360 sub = wctx.sub(s)
1355 self.ui.status(_('committing subrepository %s\n') %
1361 self.ui.status(_('committing subrepository %s\n') %
1356 subrepo.subrelpath(sub))
1362 subrepo.subrelpath(sub))
1357 sr = sub.commit(cctx._text, user, date)
1363 sr = sub.commit(cctx._text, user, date)
1358 newstate[s] = (newstate[s][0], sr)
1364 newstate[s] = (newstate[s][0], sr)
1359 subrepo.writestate(self, newstate)
1365 subrepo.writestate(self, newstate)
1360
1366
1361 # Save commit message in case this transaction gets rolled back
1367 # Save commit message in case this transaction gets rolled back
1362 # (e.g. by a pretxncommit hook). Leave the content alone on
1368 # (e.g. by a pretxncommit hook). Leave the content alone on
1363 # the assumption that the user will use the same editor again.
1369 # the assumption that the user will use the same editor again.
1364 msgfn = self.savecommitmessage(cctx._text)
1370 msgfn = self.savecommitmessage(cctx._text)
1365
1371
1366 p1, p2 = self.dirstate.parents()
1372 p1, p2 = self.dirstate.parents()
1367 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1373 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1368 try:
1374 try:
1369 self.hook("precommit", throw=True, parent1=hookp1,
1375 self.hook("precommit", throw=True, parent1=hookp1,
1370 parent2=hookp2)
1376 parent2=hookp2)
1371 ret = self.commitctx(cctx, True)
1377 ret = self.commitctx(cctx, True)
1372 except: # re-raises
1378 except: # re-raises
1373 if edited:
1379 if edited:
1374 self.ui.write(
1380 self.ui.write(
1375 _('note: commit message saved in %s\n') % msgfn)
1381 _('note: commit message saved in %s\n') % msgfn)
1376 raise
1382 raise
1377
1383
1378 # update bookmarks, dirstate and mergestate
1384 # update bookmarks, dirstate and mergestate
1379 bookmarks.update(self, [p1, p2], ret)
1385 bookmarks.update(self, [p1, p2], ret)
1380 for f in changes[0] + changes[1]:
1386 for f in changes[0] + changes[1]:
1381 self.dirstate.normal(f)
1387 self.dirstate.normal(f)
1382 for f in changes[2]:
1388 for f in changes[2]:
1383 self.dirstate.drop(f)
1389 self.dirstate.drop(f)
1384 self.dirstate.setparents(ret)
1390 self.dirstate.setparents(ret)
1385 ms.reset()
1391 ms.reset()
1386 finally:
1392 finally:
1387 wlock.release()
1393 wlock.release()
1388
1394
1389 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1395 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1390 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1396 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1391 self._afterlock(commithook)
1397 self._afterlock(commithook)
1392 return ret
1398 return ret
1393
1399
1394 def commitctx(self, ctx, error=False):
1400 def commitctx(self, ctx, error=False):
1395 """Add a new revision to current repository.
1401 """Add a new revision to current repository.
1396 Revision information is passed via the context argument.
1402 Revision information is passed via the context argument.
1397 """
1403 """
1398
1404
1399 tr = lock = None
1405 tr = lock = None
1400 removed = list(ctx.removed())
1406 removed = list(ctx.removed())
1401 p1, p2 = ctx.p1(), ctx.p2()
1407 p1, p2 = ctx.p1(), ctx.p2()
1402 user = ctx.user()
1408 user = ctx.user()
1403
1409
1404 lock = self.lock()
1410 lock = self.lock()
1405 try:
1411 try:
1406 tr = self.transaction("commit")
1412 tr = self.transaction("commit")
1407 trp = weakref.proxy(tr)
1413 trp = weakref.proxy(tr)
1408
1414
1409 if ctx.files():
1415 if ctx.files():
1410 m1 = p1.manifest().copy()
1416 m1 = p1.manifest().copy()
1411 m2 = p2.manifest()
1417 m2 = p2.manifest()
1412
1418
1413 # check in files
1419 # check in files
1414 new = {}
1420 new = {}
1415 changed = []
1421 changed = []
1416 linkrev = len(self)
1422 linkrev = len(self)
1417 for f in sorted(ctx.modified() + ctx.added()):
1423 for f in sorted(ctx.modified() + ctx.added()):
1418 self.ui.note(f + "\n")
1424 self.ui.note(f + "\n")
1419 try:
1425 try:
1420 fctx = ctx[f]
1426 fctx = ctx[f]
1421 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1427 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1422 changed)
1428 changed)
1423 m1.set(f, fctx.flags())
1429 m1.set(f, fctx.flags())
1424 except OSError, inst:
1430 except OSError, inst:
1425 self.ui.warn(_("trouble committing %s!\n") % f)
1431 self.ui.warn(_("trouble committing %s!\n") % f)
1426 raise
1432 raise
1427 except IOError, inst:
1433 except IOError, inst:
1428 errcode = getattr(inst, 'errno', errno.ENOENT)
1434 errcode = getattr(inst, 'errno', errno.ENOENT)
1429 if error or errcode and errcode != errno.ENOENT:
1435 if error or errcode and errcode != errno.ENOENT:
1430 self.ui.warn(_("trouble committing %s!\n") % f)
1436 self.ui.warn(_("trouble committing %s!\n") % f)
1431 raise
1437 raise
1432 else:
1438 else:
1433 removed.append(f)
1439 removed.append(f)
1434
1440
1435 # update manifest
1441 # update manifest
1436 m1.update(new)
1442 m1.update(new)
1437 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1443 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1438 drop = [f for f in removed if f in m1]
1444 drop = [f for f in removed if f in m1]
1439 for f in drop:
1445 for f in drop:
1440 del m1[f]
1446 del m1[f]
1441 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1447 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1442 p2.manifestnode(), (new, drop))
1448 p2.manifestnode(), (new, drop))
1443 files = changed + removed
1449 files = changed + removed
1444 else:
1450 else:
1445 mn = p1.manifestnode()
1451 mn = p1.manifestnode()
1446 files = []
1452 files = []
1447
1453
1448 # update changelog
1454 # update changelog
1449 self.changelog.delayupdate()
1455 self.changelog.delayupdate()
1450 n = self.changelog.add(mn, files, ctx.description(),
1456 n = self.changelog.add(mn, files, ctx.description(),
1451 trp, p1.node(), p2.node(),
1457 trp, p1.node(), p2.node(),
1452 user, ctx.date(), ctx.extra().copy())
1458 user, ctx.date(), ctx.extra().copy())
1453 p = lambda: self.changelog.writepending() and self.root or ""
1459 p = lambda: self.changelog.writepending() and self.root or ""
1454 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1460 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1455 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1461 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1456 parent2=xp2, pending=p)
1462 parent2=xp2, pending=p)
1457 self.changelog.finalize(trp)
1463 self.changelog.finalize(trp)
1458 # set the new commit is proper phase
1464 # set the new commit is proper phase
1459 targetphase = phases.newcommitphase(self.ui)
1465 targetphase = phases.newcommitphase(self.ui)
1460 if targetphase:
1466 if targetphase:
1461 # retract boundary do not alter parent changeset.
1467 # retract boundary do not alter parent changeset.
1462 # if a parent have higher the resulting phase will
1468 # if a parent have higher the resulting phase will
1463 # be compliant anyway
1469 # be compliant anyway
1464 #
1470 #
1465 # if minimal phase was 0 we don't need to retract anything
1471 # if minimal phase was 0 we don't need to retract anything
1466 phases.retractboundary(self, targetphase, [n])
1472 phases.retractboundary(self, targetphase, [n])
1467 tr.close()
1473 tr.close()
1468 self.updatebranchcache()
1474 self.updatebranchcache()
1469 return n
1475 return n
1470 finally:
1476 finally:
1471 if tr:
1477 if tr:
1472 tr.release()
1478 tr.release()
1473 lock.release()
1479 lock.release()
1474
1480
1475 def destroyed(self, newheadnodes=None):
1481 def destroyed(self, newheadnodes=None):
1476 '''Inform the repository that nodes have been destroyed.
1482 '''Inform the repository that nodes have been destroyed.
1477 Intended for use by strip and rollback, so there's a common
1483 Intended for use by strip and rollback, so there's a common
1478 place for anything that has to be done after destroying history.
1484 place for anything that has to be done after destroying history.
1479
1485
1480 If you know the branchheadcache was uptodate before nodes were removed
1486 If you know the branchheadcache was uptodate before nodes were removed
1481 and you also know the set of candidate new heads that may have resulted
1487 and you also know the set of candidate new heads that may have resulted
1482 from the destruction, you can set newheadnodes. This will enable the
1488 from the destruction, you can set newheadnodes. This will enable the
1483 code to update the branchheads cache, rather than having future code
1489 code to update the branchheads cache, rather than having future code
1484 decide it's invalid and regenerating it from scratch.
1490 decide it's invalid and regenerating it from scratch.
1485 '''
1491 '''
1486 # If we have info, newheadnodes, on how to update the branch cache, do
1492 # If we have info, newheadnodes, on how to update the branch cache, do
1487 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1493 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1488 # will be caught the next time it is read.
1494 # will be caught the next time it is read.
1489 if newheadnodes:
1495 if newheadnodes:
1490 tiprev = len(self) - 1
1496 tiprev = len(self) - 1
1491 ctxgen = (self[node] for node in newheadnodes
1497 ctxgen = (self[node] for node in newheadnodes
1492 if self.changelog.hasnode(node))
1498 if self.changelog.hasnode(node))
1493 self._updatebranchcache(self._branchcache, ctxgen)
1499 self._updatebranchcache(self._branchcache, ctxgen)
1494 self._writebranchcache(self._branchcache, self.changelog.tip(),
1500 self._writebranchcache(self._branchcache, self.changelog.tip(),
1495 tiprev)
1501 tiprev)
1496
1502
1497 # Ensure the persistent tag cache is updated. Doing it now
1503 # Ensure the persistent tag cache is updated. Doing it now
1498 # means that the tag cache only has to worry about destroyed
1504 # means that the tag cache only has to worry about destroyed
1499 # heads immediately after a strip/rollback. That in turn
1505 # heads immediately after a strip/rollback. That in turn
1500 # guarantees that "cachetip == currenttip" (comparing both rev
1506 # guarantees that "cachetip == currenttip" (comparing both rev
1501 # and node) always means no nodes have been added or destroyed.
1507 # and node) always means no nodes have been added or destroyed.
1502
1508
1503 # XXX this is suboptimal when qrefresh'ing: we strip the current
1509 # XXX this is suboptimal when qrefresh'ing: we strip the current
1504 # head, refresh the tag cache, then immediately add a new head.
1510 # head, refresh the tag cache, then immediately add a new head.
1505 # But I think doing it this way is necessary for the "instant
1511 # But I think doing it this way is necessary for the "instant
1506 # tag cache retrieval" case to work.
1512 # tag cache retrieval" case to work.
1507 self.invalidatecaches()
1513 self.invalidatecaches()
1508
1514
1509 # Discard all cache entries to force reloading everything.
1515 # Discard all cache entries to force reloading everything.
1510 self._filecache.clear()
1516 self._filecache.clear()
1511
1517
1512 def walk(self, match, node=None):
1518 def walk(self, match, node=None):
1513 '''
1519 '''
1514 walk recursively through the directory tree or a given
1520 walk recursively through the directory tree or a given
1515 changeset, finding all files matched by the match
1521 changeset, finding all files matched by the match
1516 function
1522 function
1517 '''
1523 '''
1518 return self[node].walk(match)
1524 return self[node].walk(match)
1519
1525
1520 def status(self, node1='.', node2=None, match=None,
1526 def status(self, node1='.', node2=None, match=None,
1521 ignored=False, clean=False, unknown=False,
1527 ignored=False, clean=False, unknown=False,
1522 listsubrepos=False):
1528 listsubrepos=False):
1523 """return status of files between two nodes or node and working
1529 """return status of files between two nodes or node and working
1524 directory.
1530 directory.
1525
1531
1526 If node1 is None, use the first dirstate parent instead.
1532 If node1 is None, use the first dirstate parent instead.
1527 If node2 is None, compare node1 with working directory.
1533 If node2 is None, compare node1 with working directory.
1528 """
1534 """
1529
1535
1530 def mfmatches(ctx):
1536 def mfmatches(ctx):
1531 mf = ctx.manifest().copy()
1537 mf = ctx.manifest().copy()
1532 if match.always():
1538 if match.always():
1533 return mf
1539 return mf
1534 for fn in mf.keys():
1540 for fn in mf.keys():
1535 if not match(fn):
1541 if not match(fn):
1536 del mf[fn]
1542 del mf[fn]
1537 return mf
1543 return mf
1538
1544
1539 if isinstance(node1, context.changectx):
1545 if isinstance(node1, context.changectx):
1540 ctx1 = node1
1546 ctx1 = node1
1541 else:
1547 else:
1542 ctx1 = self[node1]
1548 ctx1 = self[node1]
1543 if isinstance(node2, context.changectx):
1549 if isinstance(node2, context.changectx):
1544 ctx2 = node2
1550 ctx2 = node2
1545 else:
1551 else:
1546 ctx2 = self[node2]
1552 ctx2 = self[node2]
1547
1553
1548 working = ctx2.rev() is None
1554 working = ctx2.rev() is None
1549 parentworking = working and ctx1 == self['.']
1555 parentworking = working and ctx1 == self['.']
1550 match = match or matchmod.always(self.root, self.getcwd())
1556 match = match or matchmod.always(self.root, self.getcwd())
1551 listignored, listclean, listunknown = ignored, clean, unknown
1557 listignored, listclean, listunknown = ignored, clean, unknown
1552
1558
1553 # load earliest manifest first for caching reasons
1559 # load earliest manifest first for caching reasons
1554 if not working and ctx2.rev() < ctx1.rev():
1560 if not working and ctx2.rev() < ctx1.rev():
1555 ctx2.manifest()
1561 ctx2.manifest()
1556
1562
1557 if not parentworking:
1563 if not parentworking:
1558 def bad(f, msg):
1564 def bad(f, msg):
1559 # 'f' may be a directory pattern from 'match.files()',
1565 # 'f' may be a directory pattern from 'match.files()',
1560 # so 'f not in ctx1' is not enough
1566 # so 'f not in ctx1' is not enough
1561 if f not in ctx1 and f not in ctx1.dirs():
1567 if f not in ctx1 and f not in ctx1.dirs():
1562 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1568 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1563 match.bad = bad
1569 match.bad = bad
1564
1570
1565 if working: # we need to scan the working dir
1571 if working: # we need to scan the working dir
1566 subrepos = []
1572 subrepos = []
1567 if '.hgsub' in self.dirstate:
1573 if '.hgsub' in self.dirstate:
1568 subrepos = ctx2.substate.keys()
1574 subrepos = ctx2.substate.keys()
1569 s = self.dirstate.status(match, subrepos, listignored,
1575 s = self.dirstate.status(match, subrepos, listignored,
1570 listclean, listunknown)
1576 listclean, listunknown)
1571 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1577 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1572
1578
1573 # check for any possibly clean files
1579 # check for any possibly clean files
1574 if parentworking and cmp:
1580 if parentworking and cmp:
1575 fixup = []
1581 fixup = []
1576 # do a full compare of any files that might have changed
1582 # do a full compare of any files that might have changed
1577 for f in sorted(cmp):
1583 for f in sorted(cmp):
1578 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1584 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1579 or ctx1[f].cmp(ctx2[f])):
1585 or ctx1[f].cmp(ctx2[f])):
1580 modified.append(f)
1586 modified.append(f)
1581 else:
1587 else:
1582 fixup.append(f)
1588 fixup.append(f)
1583
1589
1584 # update dirstate for files that are actually clean
1590 # update dirstate for files that are actually clean
1585 if fixup:
1591 if fixup:
1586 if listclean:
1592 if listclean:
1587 clean += fixup
1593 clean += fixup
1588
1594
1589 try:
1595 try:
1590 # updating the dirstate is optional
1596 # updating the dirstate is optional
1591 # so we don't wait on the lock
1597 # so we don't wait on the lock
1592 wlock = self.wlock(False)
1598 wlock = self.wlock(False)
1593 try:
1599 try:
1594 for f in fixup:
1600 for f in fixup:
1595 self.dirstate.normal(f)
1601 self.dirstate.normal(f)
1596 finally:
1602 finally:
1597 wlock.release()
1603 wlock.release()
1598 except error.LockError:
1604 except error.LockError:
1599 pass
1605 pass
1600
1606
1601 if not parentworking:
1607 if not parentworking:
1602 mf1 = mfmatches(ctx1)
1608 mf1 = mfmatches(ctx1)
1603 if working:
1609 if working:
1604 # we are comparing working dir against non-parent
1610 # we are comparing working dir against non-parent
1605 # generate a pseudo-manifest for the working dir
1611 # generate a pseudo-manifest for the working dir
1606 mf2 = mfmatches(self['.'])
1612 mf2 = mfmatches(self['.'])
1607 for f in cmp + modified + added:
1613 for f in cmp + modified + added:
1608 mf2[f] = None
1614 mf2[f] = None
1609 mf2.set(f, ctx2.flags(f))
1615 mf2.set(f, ctx2.flags(f))
1610 for f in removed:
1616 for f in removed:
1611 if f in mf2:
1617 if f in mf2:
1612 del mf2[f]
1618 del mf2[f]
1613 else:
1619 else:
1614 # we are comparing two revisions
1620 # we are comparing two revisions
1615 deleted, unknown, ignored = [], [], []
1621 deleted, unknown, ignored = [], [], []
1616 mf2 = mfmatches(ctx2)
1622 mf2 = mfmatches(ctx2)
1617
1623
1618 modified, added, clean = [], [], []
1624 modified, added, clean = [], [], []
1619 withflags = mf1.withflags() | mf2.withflags()
1625 withflags = mf1.withflags() | mf2.withflags()
1620 for fn in mf2:
1626 for fn in mf2:
1621 if fn in mf1:
1627 if fn in mf1:
1622 if (fn not in deleted and
1628 if (fn not in deleted and
1623 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1629 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1624 (mf1[fn] != mf2[fn] and
1630 (mf1[fn] != mf2[fn] and
1625 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1631 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1626 modified.append(fn)
1632 modified.append(fn)
1627 elif listclean:
1633 elif listclean:
1628 clean.append(fn)
1634 clean.append(fn)
1629 del mf1[fn]
1635 del mf1[fn]
1630 elif fn not in deleted:
1636 elif fn not in deleted:
1631 added.append(fn)
1637 added.append(fn)
1632 removed = mf1.keys()
1638 removed = mf1.keys()
1633
1639
1634 if working and modified and not self.dirstate._checklink:
1640 if working and modified and not self.dirstate._checklink:
1635 # Symlink placeholders may get non-symlink-like contents
1641 # Symlink placeholders may get non-symlink-like contents
1636 # via user error or dereferencing by NFS or Samba servers,
1642 # via user error or dereferencing by NFS or Samba servers,
1637 # so we filter out any placeholders that don't look like a
1643 # so we filter out any placeholders that don't look like a
1638 # symlink
1644 # symlink
1639 sane = []
1645 sane = []
1640 for f in modified:
1646 for f in modified:
1641 if ctx2.flags(f) == 'l':
1647 if ctx2.flags(f) == 'l':
1642 d = ctx2[f].data()
1648 d = ctx2[f].data()
1643 if len(d) >= 1024 or '\n' in d or util.binary(d):
1649 if len(d) >= 1024 or '\n' in d or util.binary(d):
1644 self.ui.debug('ignoring suspect symlink placeholder'
1650 self.ui.debug('ignoring suspect symlink placeholder'
1645 ' "%s"\n' % f)
1651 ' "%s"\n' % f)
1646 continue
1652 continue
1647 sane.append(f)
1653 sane.append(f)
1648 modified = sane
1654 modified = sane
1649
1655
1650 r = modified, added, removed, deleted, unknown, ignored, clean
1656 r = modified, added, removed, deleted, unknown, ignored, clean
1651
1657
1652 if listsubrepos:
1658 if listsubrepos:
1653 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1659 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1654 if working:
1660 if working:
1655 rev2 = None
1661 rev2 = None
1656 else:
1662 else:
1657 rev2 = ctx2.substate[subpath][1]
1663 rev2 = ctx2.substate[subpath][1]
1658 try:
1664 try:
1659 submatch = matchmod.narrowmatcher(subpath, match)
1665 submatch = matchmod.narrowmatcher(subpath, match)
1660 s = sub.status(rev2, match=submatch, ignored=listignored,
1666 s = sub.status(rev2, match=submatch, ignored=listignored,
1661 clean=listclean, unknown=listunknown,
1667 clean=listclean, unknown=listunknown,
1662 listsubrepos=True)
1668 listsubrepos=True)
1663 for rfiles, sfiles in zip(r, s):
1669 for rfiles, sfiles in zip(r, s):
1664 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1670 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1665 except error.LookupError:
1671 except error.LookupError:
1666 self.ui.status(_("skipping missing subrepository: %s\n")
1672 self.ui.status(_("skipping missing subrepository: %s\n")
1667 % subpath)
1673 % subpath)
1668
1674
1669 for l in r:
1675 for l in r:
1670 l.sort()
1676 l.sort()
1671 return r
1677 return r
1672
1678
1673 def heads(self, start=None):
1679 def heads(self, start=None):
1674 heads = self.changelog.heads(start)
1680 heads = self.changelog.heads(start)
1675 # sort the output in rev descending order
1681 # sort the output in rev descending order
1676 return sorted(heads, key=self.changelog.rev, reverse=True)
1682 return sorted(heads, key=self.changelog.rev, reverse=True)
1677
1683
1678 def branchheads(self, branch=None, start=None, closed=False):
1684 def branchheads(self, branch=None, start=None, closed=False):
1679 '''return a (possibly filtered) list of heads for the given branch
1685 '''return a (possibly filtered) list of heads for the given branch
1680
1686
1681 Heads are returned in topological order, from newest to oldest.
1687 Heads are returned in topological order, from newest to oldest.
1682 If branch is None, use the dirstate branch.
1688 If branch is None, use the dirstate branch.
1683 If start is not None, return only heads reachable from start.
1689 If start is not None, return only heads reachable from start.
1684 If closed is True, return heads that are marked as closed as well.
1690 If closed is True, return heads that are marked as closed as well.
1685 '''
1691 '''
1686 if branch is None:
1692 if branch is None:
1687 branch = self[None].branch()
1693 branch = self[None].branch()
1688 branches = self.branchmap()
1694 branches = self.branchmap()
1689 if branch not in branches:
1695 if branch not in branches:
1690 return []
1696 return []
1691 # the cache returns heads ordered lowest to highest
1697 # the cache returns heads ordered lowest to highest
1692 bheads = list(reversed(branches[branch]))
1698 bheads = list(reversed(branches[branch]))
1693 if start is not None:
1699 if start is not None:
1694 # filter out the heads that cannot be reached from startrev
1700 # filter out the heads that cannot be reached from startrev
1695 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1701 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1696 bheads = [h for h in bheads if h in fbheads]
1702 bheads = [h for h in bheads if h in fbheads]
1697 if not closed:
1703 if not closed:
1698 bheads = [h for h in bheads if not self[h].closesbranch()]
1704 bheads = [h for h in bheads if not self[h].closesbranch()]
1699 return bheads
1705 return bheads
1700
1706
1701 def branches(self, nodes):
1707 def branches(self, nodes):
1702 if not nodes:
1708 if not nodes:
1703 nodes = [self.changelog.tip()]
1709 nodes = [self.changelog.tip()]
1704 b = []
1710 b = []
1705 for n in nodes:
1711 for n in nodes:
1706 t = n
1712 t = n
1707 while True:
1713 while True:
1708 p = self.changelog.parents(n)
1714 p = self.changelog.parents(n)
1709 if p[1] != nullid or p[0] == nullid:
1715 if p[1] != nullid or p[0] == nullid:
1710 b.append((t, n, p[0], p[1]))
1716 b.append((t, n, p[0], p[1]))
1711 break
1717 break
1712 n = p[0]
1718 n = p[0]
1713 return b
1719 return b
1714
1720
1715 def between(self, pairs):
1721 def between(self, pairs):
1716 r = []
1722 r = []
1717
1723
1718 for top, bottom in pairs:
1724 for top, bottom in pairs:
1719 n, l, i = top, [], 0
1725 n, l, i = top, [], 0
1720 f = 1
1726 f = 1
1721
1727
1722 while n != bottom and n != nullid:
1728 while n != bottom and n != nullid:
1723 p = self.changelog.parents(n)[0]
1729 p = self.changelog.parents(n)[0]
1724 if i == f:
1730 if i == f:
1725 l.append(n)
1731 l.append(n)
1726 f = f * 2
1732 f = f * 2
1727 n = p
1733 n = p
1728 i += 1
1734 i += 1
1729
1735
1730 r.append(l)
1736 r.append(l)
1731
1737
1732 return r
1738 return r
1733
1739
1734 def pull(self, remote, heads=None, force=False):
1740 def pull(self, remote, heads=None, force=False):
1735 # don't open transaction for nothing or you break future useful
1741 # don't open transaction for nothing or you break future useful
1736 # rollback call
1742 # rollback call
1737 tr = None
1743 tr = None
1738 trname = 'pull\n' + util.hidepassword(remote.url())
1744 trname = 'pull\n' + util.hidepassword(remote.url())
1739 lock = self.lock()
1745 lock = self.lock()
1740 try:
1746 try:
1741 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1747 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1742 force=force)
1748 force=force)
1743 common, fetch, rheads = tmp
1749 common, fetch, rheads = tmp
1744 if not fetch:
1750 if not fetch:
1745 self.ui.status(_("no changes found\n"))
1751 self.ui.status(_("no changes found\n"))
1746 added = []
1752 added = []
1747 result = 0
1753 result = 0
1748 else:
1754 else:
1749 tr = self.transaction(trname)
1755 tr = self.transaction(trname)
1750 if heads is None and list(common) == [nullid]:
1756 if heads is None and list(common) == [nullid]:
1751 self.ui.status(_("requesting all changes\n"))
1757 self.ui.status(_("requesting all changes\n"))
1752 elif heads is None and remote.capable('changegroupsubset'):
1758 elif heads is None and remote.capable('changegroupsubset'):
1753 # issue1320, avoid a race if remote changed after discovery
1759 # issue1320, avoid a race if remote changed after discovery
1754 heads = rheads
1760 heads = rheads
1755
1761
1756 if remote.capable('getbundle'):
1762 if remote.capable('getbundle'):
1757 cg = remote.getbundle('pull', common=common,
1763 cg = remote.getbundle('pull', common=common,
1758 heads=heads or rheads)
1764 heads=heads or rheads)
1759 elif heads is None:
1765 elif heads is None:
1760 cg = remote.changegroup(fetch, 'pull')
1766 cg = remote.changegroup(fetch, 'pull')
1761 elif not remote.capable('changegroupsubset'):
1767 elif not remote.capable('changegroupsubset'):
1762 raise util.Abort(_("partial pull cannot be done because "
1768 raise util.Abort(_("partial pull cannot be done because "
1763 "other repository doesn't support "
1769 "other repository doesn't support "
1764 "changegroupsubset."))
1770 "changegroupsubset."))
1765 else:
1771 else:
1766 cg = remote.changegroupsubset(fetch, heads, 'pull')
1772 cg = remote.changegroupsubset(fetch, heads, 'pull')
1767 clstart = len(self.changelog)
1773 clstart = len(self.changelog)
1768 result = self.addchangegroup(cg, 'pull', remote.url())
1774 result = self.addchangegroup(cg, 'pull', remote.url())
1769 clend = len(self.changelog)
1775 clend = len(self.changelog)
1770 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1776 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1771
1777
1772 # compute target subset
1778 # compute target subset
1773 if heads is None:
1779 if heads is None:
1774 # We pulled every thing possible
1780 # We pulled every thing possible
1775 # sync on everything common
1781 # sync on everything common
1776 subset = common + added
1782 subset = common + added
1777 else:
1783 else:
1778 # We pulled a specific subset
1784 # We pulled a specific subset
1779 # sync on this subset
1785 # sync on this subset
1780 subset = heads
1786 subset = heads
1781
1787
1782 # Get remote phases data from remote
1788 # Get remote phases data from remote
1783 remotephases = remote.listkeys('phases')
1789 remotephases = remote.listkeys('phases')
1784 publishing = bool(remotephases.get('publishing', False))
1790 publishing = bool(remotephases.get('publishing', False))
1785 if remotephases and not publishing:
1791 if remotephases and not publishing:
1786 # remote is new and unpublishing
1792 # remote is new and unpublishing
1787 pheads, _dr = phases.analyzeremotephases(self, subset,
1793 pheads, _dr = phases.analyzeremotephases(self, subset,
1788 remotephases)
1794 remotephases)
1789 phases.advanceboundary(self, phases.public, pheads)
1795 phases.advanceboundary(self, phases.public, pheads)
1790 phases.advanceboundary(self, phases.draft, subset)
1796 phases.advanceboundary(self, phases.draft, subset)
1791 else:
1797 else:
1792 # Remote is old or publishing all common changesets
1798 # Remote is old or publishing all common changesets
1793 # should be seen as public
1799 # should be seen as public
1794 phases.advanceboundary(self, phases.public, subset)
1800 phases.advanceboundary(self, phases.public, subset)
1795
1801
1796 if obsolete._enabled:
1802 if obsolete._enabled:
1797 self.ui.debug('fetching remote obsolete markers\n')
1803 self.ui.debug('fetching remote obsolete markers\n')
1798 remoteobs = remote.listkeys('obsolete')
1804 remoteobs = remote.listkeys('obsolete')
1799 if 'dump0' in remoteobs:
1805 if 'dump0' in remoteobs:
1800 if tr is None:
1806 if tr is None:
1801 tr = self.transaction(trname)
1807 tr = self.transaction(trname)
1802 for key in sorted(remoteobs, reverse=True):
1808 for key in sorted(remoteobs, reverse=True):
1803 if key.startswith('dump'):
1809 if key.startswith('dump'):
1804 data = base85.b85decode(remoteobs[key])
1810 data = base85.b85decode(remoteobs[key])
1805 self.obsstore.mergemarkers(tr, data)
1811 self.obsstore.mergemarkers(tr, data)
1806 if tr is not None:
1812 if tr is not None:
1807 tr.close()
1813 tr.close()
1808 finally:
1814 finally:
1809 if tr is not None:
1815 if tr is not None:
1810 tr.release()
1816 tr.release()
1811 lock.release()
1817 lock.release()
1812
1818
1813 return result
1819 return result
1814
1820
1815 def checkpush(self, force, revs):
1821 def checkpush(self, force, revs):
1816 """Extensions can override this function if additional checks have
1822 """Extensions can override this function if additional checks have
1817 to be performed before pushing, or call it if they override push
1823 to be performed before pushing, or call it if they override push
1818 command.
1824 command.
1819 """
1825 """
1820 pass
1826 pass
1821
1827
1822 def push(self, remote, force=False, revs=None, newbranch=False):
1828 def push(self, remote, force=False, revs=None, newbranch=False):
1823 '''Push outgoing changesets (limited by revs) from the current
1829 '''Push outgoing changesets (limited by revs) from the current
1824 repository to remote. Return an integer:
1830 repository to remote. Return an integer:
1825 - None means nothing to push
1831 - None means nothing to push
1826 - 0 means HTTP error
1832 - 0 means HTTP error
1827 - 1 means we pushed and remote head count is unchanged *or*
1833 - 1 means we pushed and remote head count is unchanged *or*
1828 we have outgoing changesets but refused to push
1834 we have outgoing changesets but refused to push
1829 - other values as described by addchangegroup()
1835 - other values as described by addchangegroup()
1830 '''
1836 '''
1831 # there are two ways to push to remote repo:
1837 # there are two ways to push to remote repo:
1832 #
1838 #
1833 # addchangegroup assumes local user can lock remote
1839 # addchangegroup assumes local user can lock remote
1834 # repo (local filesystem, old ssh servers).
1840 # repo (local filesystem, old ssh servers).
1835 #
1841 #
1836 # unbundle assumes local user cannot lock remote repo (new ssh
1842 # unbundle assumes local user cannot lock remote repo (new ssh
1837 # servers, http servers).
1843 # servers, http servers).
1838
1844
1839 if not remote.canpush():
1845 if not remote.canpush():
1840 raise util.Abort(_("destination does not support push"))
1846 raise util.Abort(_("destination does not support push"))
1841 # get local lock as we might write phase data
1847 # get local lock as we might write phase data
1842 locallock = self.lock()
1848 locallock = self.lock()
1843 try:
1849 try:
1844 self.checkpush(force, revs)
1850 self.checkpush(force, revs)
1845 lock = None
1851 lock = None
1846 unbundle = remote.capable('unbundle')
1852 unbundle = remote.capable('unbundle')
1847 if not unbundle:
1853 if not unbundle:
1848 lock = remote.lock()
1854 lock = remote.lock()
1849 try:
1855 try:
1850 # discovery
1856 # discovery
1851 fci = discovery.findcommonincoming
1857 fci = discovery.findcommonincoming
1852 commoninc = fci(self, remote, force=force)
1858 commoninc = fci(self, remote, force=force)
1853 common, inc, remoteheads = commoninc
1859 common, inc, remoteheads = commoninc
1854 fco = discovery.findcommonoutgoing
1860 fco = discovery.findcommonoutgoing
1855 outgoing = fco(self, remote, onlyheads=revs,
1861 outgoing = fco(self, remote, onlyheads=revs,
1856 commoninc=commoninc, force=force)
1862 commoninc=commoninc, force=force)
1857
1863
1858
1864
1859 if not outgoing.missing:
1865 if not outgoing.missing:
1860 # nothing to push
1866 # nothing to push
1861 scmutil.nochangesfound(self.ui, self, outgoing.excluded)
1867 scmutil.nochangesfound(self.ui, self, outgoing.excluded)
1862 ret = None
1868 ret = None
1863 else:
1869 else:
1864 # something to push
1870 # something to push
1865 if not force:
1871 if not force:
1866 # if self.obsstore == False --> no obsolete
1872 # if self.obsstore == False --> no obsolete
1867 # then, save the iteration
1873 # then, save the iteration
1868 if self.obsstore:
1874 if self.obsstore:
1869 # this message are here for 80 char limit reason
1875 # this message are here for 80 char limit reason
1870 mso = _("push includes obsolete changeset: %s!")
1876 mso = _("push includes obsolete changeset: %s!")
1871 msu = _("push includes unstable changeset: %s!")
1877 msu = _("push includes unstable changeset: %s!")
1872 msb = _("push includes bumped changeset: %s!")
1878 msb = _("push includes bumped changeset: %s!")
1873 # If we are to push if there is at least one
1879 # If we are to push if there is at least one
1874 # obsolete or unstable changeset in missing, at
1880 # obsolete or unstable changeset in missing, at
1875 # least one of the missinghead will be obsolete or
1881 # least one of the missinghead will be obsolete or
1876 # unstable. So checking heads only is ok
1882 # unstable. So checking heads only is ok
1877 for node in outgoing.missingheads:
1883 for node in outgoing.missingheads:
1878 ctx = self[node]
1884 ctx = self[node]
1879 if ctx.obsolete():
1885 if ctx.obsolete():
1880 raise util.Abort(mso % ctx)
1886 raise util.Abort(mso % ctx)
1881 elif ctx.unstable():
1887 elif ctx.unstable():
1882 raise util.Abort(msu % ctx)
1888 raise util.Abort(msu % ctx)
1883 elif ctx.bumped():
1889 elif ctx.bumped():
1884 raise util.Abort(msb % ctx)
1890 raise util.Abort(msb % ctx)
1885 discovery.checkheads(self, remote, outgoing,
1891 discovery.checkheads(self, remote, outgoing,
1886 remoteheads, newbranch,
1892 remoteheads, newbranch,
1887 bool(inc))
1893 bool(inc))
1888
1894
1889 # create a changegroup from local
1895 # create a changegroup from local
1890 if revs is None and not outgoing.excluded:
1896 if revs is None and not outgoing.excluded:
1891 # push everything,
1897 # push everything,
1892 # use the fast path, no race possible on push
1898 # use the fast path, no race possible on push
1893 cg = self._changegroup(outgoing.missing, 'push')
1899 cg = self._changegroup(outgoing.missing, 'push')
1894 else:
1900 else:
1895 cg = self.getlocalbundle('push', outgoing)
1901 cg = self.getlocalbundle('push', outgoing)
1896
1902
1897 # apply changegroup to remote
1903 # apply changegroup to remote
1898 if unbundle:
1904 if unbundle:
1899 # local repo finds heads on server, finds out what
1905 # local repo finds heads on server, finds out what
1900 # revs it must push. once revs transferred, if server
1906 # revs it must push. once revs transferred, if server
1901 # finds it has different heads (someone else won
1907 # finds it has different heads (someone else won
1902 # commit/push race), server aborts.
1908 # commit/push race), server aborts.
1903 if force:
1909 if force:
1904 remoteheads = ['force']
1910 remoteheads = ['force']
1905 # ssh: return remote's addchangegroup()
1911 # ssh: return remote's addchangegroup()
1906 # http: return remote's addchangegroup() or 0 for error
1912 # http: return remote's addchangegroup() or 0 for error
1907 ret = remote.unbundle(cg, remoteheads, 'push')
1913 ret = remote.unbundle(cg, remoteheads, 'push')
1908 else:
1914 else:
1909 # we return an integer indicating remote head count
1915 # we return an integer indicating remote head count
1910 # change
1916 # change
1911 ret = remote.addchangegroup(cg, 'push', self.url())
1917 ret = remote.addchangegroup(cg, 'push', self.url())
1912
1918
1913 if ret:
1919 if ret:
1914 # push succeed, synchronize target of the push
1920 # push succeed, synchronize target of the push
1915 cheads = outgoing.missingheads
1921 cheads = outgoing.missingheads
1916 elif revs is None:
1922 elif revs is None:
1917 # All out push fails. synchronize all common
1923 # All out push fails. synchronize all common
1918 cheads = outgoing.commonheads
1924 cheads = outgoing.commonheads
1919 else:
1925 else:
1920 # I want cheads = heads(::missingheads and ::commonheads)
1926 # I want cheads = heads(::missingheads and ::commonheads)
1921 # (missingheads is revs with secret changeset filtered out)
1927 # (missingheads is revs with secret changeset filtered out)
1922 #
1928 #
1923 # This can be expressed as:
1929 # This can be expressed as:
1924 # cheads = ( (missingheads and ::commonheads)
1930 # cheads = ( (missingheads and ::commonheads)
1925 # + (commonheads and ::missingheads))"
1931 # + (commonheads and ::missingheads))"
1926 # )
1932 # )
1927 #
1933 #
1928 # while trying to push we already computed the following:
1934 # while trying to push we already computed the following:
1929 # common = (::commonheads)
1935 # common = (::commonheads)
1930 # missing = ((commonheads::missingheads) - commonheads)
1936 # missing = ((commonheads::missingheads) - commonheads)
1931 #
1937 #
1932 # We can pick:
1938 # We can pick:
1933 # * missingheads part of common (::commonheads)
1939 # * missingheads part of common (::commonheads)
1934 common = set(outgoing.common)
1940 common = set(outgoing.common)
1935 cheads = [node for node in revs if node in common]
1941 cheads = [node for node in revs if node in common]
1936 # and
1942 # and
1937 # * commonheads parents on missing
1943 # * commonheads parents on missing
1938 revset = self.set('%ln and parents(roots(%ln))',
1944 revset = self.set('%ln and parents(roots(%ln))',
1939 outgoing.commonheads,
1945 outgoing.commonheads,
1940 outgoing.missing)
1946 outgoing.missing)
1941 cheads.extend(c.node() for c in revset)
1947 cheads.extend(c.node() for c in revset)
1942 # even when we don't push, exchanging phase data is useful
1948 # even when we don't push, exchanging phase data is useful
1943 remotephases = remote.listkeys('phases')
1949 remotephases = remote.listkeys('phases')
1944 if not remotephases: # old server or public only repo
1950 if not remotephases: # old server or public only repo
1945 phases.advanceboundary(self, phases.public, cheads)
1951 phases.advanceboundary(self, phases.public, cheads)
1946 # don't push any phase data as there is nothing to push
1952 # don't push any phase data as there is nothing to push
1947 else:
1953 else:
1948 ana = phases.analyzeremotephases(self, cheads, remotephases)
1954 ana = phases.analyzeremotephases(self, cheads, remotephases)
1949 pheads, droots = ana
1955 pheads, droots = ana
1950 ### Apply remote phase on local
1956 ### Apply remote phase on local
1951 if remotephases.get('publishing', False):
1957 if remotephases.get('publishing', False):
1952 phases.advanceboundary(self, phases.public, cheads)
1958 phases.advanceboundary(self, phases.public, cheads)
1953 else: # publish = False
1959 else: # publish = False
1954 phases.advanceboundary(self, phases.public, pheads)
1960 phases.advanceboundary(self, phases.public, pheads)
1955 phases.advanceboundary(self, phases.draft, cheads)
1961 phases.advanceboundary(self, phases.draft, cheads)
1956 ### Apply local phase on remote
1962 ### Apply local phase on remote
1957
1963
1958 # Get the list of all revs draft on remote by public here.
1964 # Get the list of all revs draft on remote by public here.
1959 # XXX Beware that revset break if droots is not strictly
1965 # XXX Beware that revset break if droots is not strictly
1960 # XXX root we may want to ensure it is but it is costly
1966 # XXX root we may want to ensure it is but it is costly
1961 outdated = self.set('heads((%ln::%ln) and public())',
1967 outdated = self.set('heads((%ln::%ln) and public())',
1962 droots, cheads)
1968 droots, cheads)
1963 for newremotehead in outdated:
1969 for newremotehead in outdated:
1964 r = remote.pushkey('phases',
1970 r = remote.pushkey('phases',
1965 newremotehead.hex(),
1971 newremotehead.hex(),
1966 str(phases.draft),
1972 str(phases.draft),
1967 str(phases.public))
1973 str(phases.public))
1968 if not r:
1974 if not r:
1969 self.ui.warn(_('updating %s to public failed!\n')
1975 self.ui.warn(_('updating %s to public failed!\n')
1970 % newremotehead)
1976 % newremotehead)
1971 self.ui.debug('try to push obsolete markers to remote\n')
1977 self.ui.debug('try to push obsolete markers to remote\n')
1972 if (obsolete._enabled and self.obsstore and
1978 if (obsolete._enabled and self.obsstore and
1973 'obsolete' in remote.listkeys('namespaces')):
1979 'obsolete' in remote.listkeys('namespaces')):
1974 rslts = []
1980 rslts = []
1975 remotedata = self.listkeys('obsolete')
1981 remotedata = self.listkeys('obsolete')
1976 for key in sorted(remotedata, reverse=True):
1982 for key in sorted(remotedata, reverse=True):
1977 # reverse sort to ensure we end with dump0
1983 # reverse sort to ensure we end with dump0
1978 data = remotedata[key]
1984 data = remotedata[key]
1979 rslts.append(remote.pushkey('obsolete', key, '', data))
1985 rslts.append(remote.pushkey('obsolete', key, '', data))
1980 if [r for r in rslts if not r]:
1986 if [r for r in rslts if not r]:
1981 msg = _('failed to push some obsolete markers!\n')
1987 msg = _('failed to push some obsolete markers!\n')
1982 self.ui.warn(msg)
1988 self.ui.warn(msg)
1983 finally:
1989 finally:
1984 if lock is not None:
1990 if lock is not None:
1985 lock.release()
1991 lock.release()
1986 finally:
1992 finally:
1987 locallock.release()
1993 locallock.release()
1988
1994
1989 self.ui.debug("checking for updated bookmarks\n")
1995 self.ui.debug("checking for updated bookmarks\n")
1990 rb = remote.listkeys('bookmarks')
1996 rb = remote.listkeys('bookmarks')
1991 for k in rb.keys():
1997 for k in rb.keys():
1992 if k in self._bookmarks:
1998 if k in self._bookmarks:
1993 nr, nl = rb[k], hex(self._bookmarks[k])
1999 nr, nl = rb[k], hex(self._bookmarks[k])
1994 if nr in self:
2000 if nr in self:
1995 cr = self[nr]
2001 cr = self[nr]
1996 cl = self[nl]
2002 cl = self[nl]
1997 if bookmarks.validdest(self, cr, cl):
2003 if bookmarks.validdest(self, cr, cl):
1998 r = remote.pushkey('bookmarks', k, nr, nl)
2004 r = remote.pushkey('bookmarks', k, nr, nl)
1999 if r:
2005 if r:
2000 self.ui.status(_("updating bookmark %s\n") % k)
2006 self.ui.status(_("updating bookmark %s\n") % k)
2001 else:
2007 else:
2002 self.ui.warn(_('updating bookmark %s'
2008 self.ui.warn(_('updating bookmark %s'
2003 ' failed!\n') % k)
2009 ' failed!\n') % k)
2004
2010
2005 return ret
2011 return ret
2006
2012
2007 def changegroupinfo(self, nodes, source):
2013 def changegroupinfo(self, nodes, source):
2008 if self.ui.verbose or source == 'bundle':
2014 if self.ui.verbose or source == 'bundle':
2009 self.ui.status(_("%d changesets found\n") % len(nodes))
2015 self.ui.status(_("%d changesets found\n") % len(nodes))
2010 if self.ui.debugflag:
2016 if self.ui.debugflag:
2011 self.ui.debug("list of changesets:\n")
2017 self.ui.debug("list of changesets:\n")
2012 for node in nodes:
2018 for node in nodes:
2013 self.ui.debug("%s\n" % hex(node))
2019 self.ui.debug("%s\n" % hex(node))
2014
2020
2015 def changegroupsubset(self, bases, heads, source):
2021 def changegroupsubset(self, bases, heads, source):
2016 """Compute a changegroup consisting of all the nodes that are
2022 """Compute a changegroup consisting of all the nodes that are
2017 descendants of any of the bases and ancestors of any of the heads.
2023 descendants of any of the bases and ancestors of any of the heads.
2018 Return a chunkbuffer object whose read() method will return
2024 Return a chunkbuffer object whose read() method will return
2019 successive changegroup chunks.
2025 successive changegroup chunks.
2020
2026
2021 It is fairly complex as determining which filenodes and which
2027 It is fairly complex as determining which filenodes and which
2022 manifest nodes need to be included for the changeset to be complete
2028 manifest nodes need to be included for the changeset to be complete
2023 is non-trivial.
2029 is non-trivial.
2024
2030
2025 Another wrinkle is doing the reverse, figuring out which changeset in
2031 Another wrinkle is doing the reverse, figuring out which changeset in
2026 the changegroup a particular filenode or manifestnode belongs to.
2032 the changegroup a particular filenode or manifestnode belongs to.
2027 """
2033 """
2028 cl = self.changelog
2034 cl = self.changelog
2029 if not bases:
2035 if not bases:
2030 bases = [nullid]
2036 bases = [nullid]
2031 csets, bases, heads = cl.nodesbetween(bases, heads)
2037 csets, bases, heads = cl.nodesbetween(bases, heads)
2032 # We assume that all ancestors of bases are known
2038 # We assume that all ancestors of bases are known
2033 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2039 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2034 return self._changegroupsubset(common, csets, heads, source)
2040 return self._changegroupsubset(common, csets, heads, source)
2035
2041
2036 def getlocalbundle(self, source, outgoing):
2042 def getlocalbundle(self, source, outgoing):
2037 """Like getbundle, but taking a discovery.outgoing as an argument.
2043 """Like getbundle, but taking a discovery.outgoing as an argument.
2038
2044
2039 This is only implemented for local repos and reuses potentially
2045 This is only implemented for local repos and reuses potentially
2040 precomputed sets in outgoing."""
2046 precomputed sets in outgoing."""
2041 if not outgoing.missing:
2047 if not outgoing.missing:
2042 return None
2048 return None
2043 return self._changegroupsubset(outgoing.common,
2049 return self._changegroupsubset(outgoing.common,
2044 outgoing.missing,
2050 outgoing.missing,
2045 outgoing.missingheads,
2051 outgoing.missingheads,
2046 source)
2052 source)
2047
2053
2048 def getbundle(self, source, heads=None, common=None):
2054 def getbundle(self, source, heads=None, common=None):
2049 """Like changegroupsubset, but returns the set difference between the
2055 """Like changegroupsubset, but returns the set difference between the
2050 ancestors of heads and the ancestors common.
2056 ancestors of heads and the ancestors common.
2051
2057
2052 If heads is None, use the local heads. If common is None, use [nullid].
2058 If heads is None, use the local heads. If common is None, use [nullid].
2053
2059
2054 The nodes in common might not all be known locally due to the way the
2060 The nodes in common might not all be known locally due to the way the
2055 current discovery protocol works.
2061 current discovery protocol works.
2056 """
2062 """
2057 cl = self.changelog
2063 cl = self.changelog
2058 if common:
2064 if common:
2059 nm = cl.nodemap
2065 nm = cl.nodemap
2060 common = [n for n in common if n in nm]
2066 common = [n for n in common if n in nm]
2061 else:
2067 else:
2062 common = [nullid]
2068 common = [nullid]
2063 if not heads:
2069 if not heads:
2064 heads = cl.heads()
2070 heads = cl.heads()
2065 return self.getlocalbundle(source,
2071 return self.getlocalbundle(source,
2066 discovery.outgoing(cl, common, heads))
2072 discovery.outgoing(cl, common, heads))
2067
2073
2068 def _changegroupsubset(self, commonrevs, csets, heads, source):
2074 def _changegroupsubset(self, commonrevs, csets, heads, source):
2069
2075
2070 cl = self.changelog
2076 cl = self.changelog
2071 mf = self.manifest
2077 mf = self.manifest
2072 mfs = {} # needed manifests
2078 mfs = {} # needed manifests
2073 fnodes = {} # needed file nodes
2079 fnodes = {} # needed file nodes
2074 changedfiles = set()
2080 changedfiles = set()
2075 fstate = ['', {}]
2081 fstate = ['', {}]
2076 count = [0, 0]
2082 count = [0, 0]
2077
2083
2078 # can we go through the fast path ?
2084 # can we go through the fast path ?
2079 heads.sort()
2085 heads.sort()
2080 if heads == sorted(self.heads()):
2086 if heads == sorted(self.heads()):
2081 return self._changegroup(csets, source)
2087 return self._changegroup(csets, source)
2082
2088
2083 # slow path
2089 # slow path
2084 self.hook('preoutgoing', throw=True, source=source)
2090 self.hook('preoutgoing', throw=True, source=source)
2085 self.changegroupinfo(csets, source)
2091 self.changegroupinfo(csets, source)
2086
2092
2087 # filter any nodes that claim to be part of the known set
2093 # filter any nodes that claim to be part of the known set
2088 def prune(revlog, missing):
2094 def prune(revlog, missing):
2089 rr, rl = revlog.rev, revlog.linkrev
2095 rr, rl = revlog.rev, revlog.linkrev
2090 return [n for n in missing
2096 return [n for n in missing
2091 if rl(rr(n)) not in commonrevs]
2097 if rl(rr(n)) not in commonrevs]
2092
2098
2093 progress = self.ui.progress
2099 progress = self.ui.progress
2094 _bundling = _('bundling')
2100 _bundling = _('bundling')
2095 _changesets = _('changesets')
2101 _changesets = _('changesets')
2096 _manifests = _('manifests')
2102 _manifests = _('manifests')
2097 _files = _('files')
2103 _files = _('files')
2098
2104
2099 def lookup(revlog, x):
2105 def lookup(revlog, x):
2100 if revlog == cl:
2106 if revlog == cl:
2101 c = cl.read(x)
2107 c = cl.read(x)
2102 changedfiles.update(c[3])
2108 changedfiles.update(c[3])
2103 mfs.setdefault(c[0], x)
2109 mfs.setdefault(c[0], x)
2104 count[0] += 1
2110 count[0] += 1
2105 progress(_bundling, count[0],
2111 progress(_bundling, count[0],
2106 unit=_changesets, total=count[1])
2112 unit=_changesets, total=count[1])
2107 return x
2113 return x
2108 elif revlog == mf:
2114 elif revlog == mf:
2109 clnode = mfs[x]
2115 clnode = mfs[x]
2110 mdata = mf.readfast(x)
2116 mdata = mf.readfast(x)
2111 for f, n in mdata.iteritems():
2117 for f, n in mdata.iteritems():
2112 if f in changedfiles:
2118 if f in changedfiles:
2113 fnodes[f].setdefault(n, clnode)
2119 fnodes[f].setdefault(n, clnode)
2114 count[0] += 1
2120 count[0] += 1
2115 progress(_bundling, count[0],
2121 progress(_bundling, count[0],
2116 unit=_manifests, total=count[1])
2122 unit=_manifests, total=count[1])
2117 return clnode
2123 return clnode
2118 else:
2124 else:
2119 progress(_bundling, count[0], item=fstate[0],
2125 progress(_bundling, count[0], item=fstate[0],
2120 unit=_files, total=count[1])
2126 unit=_files, total=count[1])
2121 return fstate[1][x]
2127 return fstate[1][x]
2122
2128
2123 bundler = changegroup.bundle10(lookup)
2129 bundler = changegroup.bundle10(lookup)
2124 reorder = self.ui.config('bundle', 'reorder', 'auto')
2130 reorder = self.ui.config('bundle', 'reorder', 'auto')
2125 if reorder == 'auto':
2131 if reorder == 'auto':
2126 reorder = None
2132 reorder = None
2127 else:
2133 else:
2128 reorder = util.parsebool(reorder)
2134 reorder = util.parsebool(reorder)
2129
2135
2130 def gengroup():
2136 def gengroup():
2131 # Create a changenode group generator that will call our functions
2137 # Create a changenode group generator that will call our functions
2132 # back to lookup the owning changenode and collect information.
2138 # back to lookup the owning changenode and collect information.
2133 count[:] = [0, len(csets)]
2139 count[:] = [0, len(csets)]
2134 for chunk in cl.group(csets, bundler, reorder=reorder):
2140 for chunk in cl.group(csets, bundler, reorder=reorder):
2135 yield chunk
2141 yield chunk
2136 progress(_bundling, None)
2142 progress(_bundling, None)
2137
2143
2138 # Create a generator for the manifestnodes that calls our lookup
2144 # Create a generator for the manifestnodes that calls our lookup
2139 # and data collection functions back.
2145 # and data collection functions back.
2140 for f in changedfiles:
2146 for f in changedfiles:
2141 fnodes[f] = {}
2147 fnodes[f] = {}
2142 count[:] = [0, len(mfs)]
2148 count[:] = [0, len(mfs)]
2143 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2149 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2144 yield chunk
2150 yield chunk
2145 progress(_bundling, None)
2151 progress(_bundling, None)
2146
2152
2147 mfs.clear()
2153 mfs.clear()
2148
2154
2149 # Go through all our files in order sorted by name.
2155 # Go through all our files in order sorted by name.
2150 count[:] = [0, len(changedfiles)]
2156 count[:] = [0, len(changedfiles)]
2151 for fname in sorted(changedfiles):
2157 for fname in sorted(changedfiles):
2152 filerevlog = self.file(fname)
2158 filerevlog = self.file(fname)
2153 if not len(filerevlog):
2159 if not len(filerevlog):
2154 raise util.Abort(_("empty or missing revlog for %s")
2160 raise util.Abort(_("empty or missing revlog for %s")
2155 % fname)
2161 % fname)
2156 fstate[0] = fname
2162 fstate[0] = fname
2157 fstate[1] = fnodes.pop(fname, {})
2163 fstate[1] = fnodes.pop(fname, {})
2158
2164
2159 nodelist = prune(filerevlog, fstate[1])
2165 nodelist = prune(filerevlog, fstate[1])
2160 if nodelist:
2166 if nodelist:
2161 count[0] += 1
2167 count[0] += 1
2162 yield bundler.fileheader(fname)
2168 yield bundler.fileheader(fname)
2163 for chunk in filerevlog.group(nodelist, bundler, reorder):
2169 for chunk in filerevlog.group(nodelist, bundler, reorder):
2164 yield chunk
2170 yield chunk
2165
2171
2166 # Signal that no more groups are left.
2172 # Signal that no more groups are left.
2167 yield bundler.close()
2173 yield bundler.close()
2168 progress(_bundling, None)
2174 progress(_bundling, None)
2169
2175
2170 if csets:
2176 if csets:
2171 self.hook('outgoing', node=hex(csets[0]), source=source)
2177 self.hook('outgoing', node=hex(csets[0]), source=source)
2172
2178
2173 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2179 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2174
2180
2175 def changegroup(self, basenodes, source):
2181 def changegroup(self, basenodes, source):
2176 # to avoid a race we use changegroupsubset() (issue1320)
2182 # to avoid a race we use changegroupsubset() (issue1320)
2177 return self.changegroupsubset(basenodes, self.heads(), source)
2183 return self.changegroupsubset(basenodes, self.heads(), source)
2178
2184
2179 def _changegroup(self, nodes, source):
2185 def _changegroup(self, nodes, source):
2180 """Compute the changegroup of all nodes that we have that a recipient
2186 """Compute the changegroup of all nodes that we have that a recipient
2181 doesn't. Return a chunkbuffer object whose read() method will return
2187 doesn't. Return a chunkbuffer object whose read() method will return
2182 successive changegroup chunks.
2188 successive changegroup chunks.
2183
2189
2184 This is much easier than the previous function as we can assume that
2190 This is much easier than the previous function as we can assume that
2185 the recipient has any changenode we aren't sending them.
2191 the recipient has any changenode we aren't sending them.
2186
2192
2187 nodes is the set of nodes to send"""
2193 nodes is the set of nodes to send"""
2188
2194
2189 cl = self.changelog
2195 cl = self.changelog
2190 mf = self.manifest
2196 mf = self.manifest
2191 mfs = {}
2197 mfs = {}
2192 changedfiles = set()
2198 changedfiles = set()
2193 fstate = ['']
2199 fstate = ['']
2194 count = [0, 0]
2200 count = [0, 0]
2195
2201
2196 self.hook('preoutgoing', throw=True, source=source)
2202 self.hook('preoutgoing', throw=True, source=source)
2197 self.changegroupinfo(nodes, source)
2203 self.changegroupinfo(nodes, source)
2198
2204
2199 revset = set([cl.rev(n) for n in nodes])
2205 revset = set([cl.rev(n) for n in nodes])
2200
2206
2201 def gennodelst(log):
2207 def gennodelst(log):
2202 ln, llr = log.node, log.linkrev
2208 ln, llr = log.node, log.linkrev
2203 return [ln(r) for r in log if llr(r) in revset]
2209 return [ln(r) for r in log if llr(r) in revset]
2204
2210
2205 progress = self.ui.progress
2211 progress = self.ui.progress
2206 _bundling = _('bundling')
2212 _bundling = _('bundling')
2207 _changesets = _('changesets')
2213 _changesets = _('changesets')
2208 _manifests = _('manifests')
2214 _manifests = _('manifests')
2209 _files = _('files')
2215 _files = _('files')
2210
2216
2211 def lookup(revlog, x):
2217 def lookup(revlog, x):
2212 if revlog == cl:
2218 if revlog == cl:
2213 c = cl.read(x)
2219 c = cl.read(x)
2214 changedfiles.update(c[3])
2220 changedfiles.update(c[3])
2215 mfs.setdefault(c[0], x)
2221 mfs.setdefault(c[0], x)
2216 count[0] += 1
2222 count[0] += 1
2217 progress(_bundling, count[0],
2223 progress(_bundling, count[0],
2218 unit=_changesets, total=count[1])
2224 unit=_changesets, total=count[1])
2219 return x
2225 return x
2220 elif revlog == mf:
2226 elif revlog == mf:
2221 count[0] += 1
2227 count[0] += 1
2222 progress(_bundling, count[0],
2228 progress(_bundling, count[0],
2223 unit=_manifests, total=count[1])
2229 unit=_manifests, total=count[1])
2224 return cl.node(revlog.linkrev(revlog.rev(x)))
2230 return cl.node(revlog.linkrev(revlog.rev(x)))
2225 else:
2231 else:
2226 progress(_bundling, count[0], item=fstate[0],
2232 progress(_bundling, count[0], item=fstate[0],
2227 total=count[1], unit=_files)
2233 total=count[1], unit=_files)
2228 return cl.node(revlog.linkrev(revlog.rev(x)))
2234 return cl.node(revlog.linkrev(revlog.rev(x)))
2229
2235
2230 bundler = changegroup.bundle10(lookup)
2236 bundler = changegroup.bundle10(lookup)
2231 reorder = self.ui.config('bundle', 'reorder', 'auto')
2237 reorder = self.ui.config('bundle', 'reorder', 'auto')
2232 if reorder == 'auto':
2238 if reorder == 'auto':
2233 reorder = None
2239 reorder = None
2234 else:
2240 else:
2235 reorder = util.parsebool(reorder)
2241 reorder = util.parsebool(reorder)
2236
2242
2237 def gengroup():
2243 def gengroup():
2238 '''yield a sequence of changegroup chunks (strings)'''
2244 '''yield a sequence of changegroup chunks (strings)'''
2239 # construct a list of all changed files
2245 # construct a list of all changed files
2240
2246
2241 count[:] = [0, len(nodes)]
2247 count[:] = [0, len(nodes)]
2242 for chunk in cl.group(nodes, bundler, reorder=reorder):
2248 for chunk in cl.group(nodes, bundler, reorder=reorder):
2243 yield chunk
2249 yield chunk
2244 progress(_bundling, None)
2250 progress(_bundling, None)
2245
2251
2246 count[:] = [0, len(mfs)]
2252 count[:] = [0, len(mfs)]
2247 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2253 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2248 yield chunk
2254 yield chunk
2249 progress(_bundling, None)
2255 progress(_bundling, None)
2250
2256
2251 count[:] = [0, len(changedfiles)]
2257 count[:] = [0, len(changedfiles)]
2252 for fname in sorted(changedfiles):
2258 for fname in sorted(changedfiles):
2253 filerevlog = self.file(fname)
2259 filerevlog = self.file(fname)
2254 if not len(filerevlog):
2260 if not len(filerevlog):
2255 raise util.Abort(_("empty or missing revlog for %s")
2261 raise util.Abort(_("empty or missing revlog for %s")
2256 % fname)
2262 % fname)
2257 fstate[0] = fname
2263 fstate[0] = fname
2258 nodelist = gennodelst(filerevlog)
2264 nodelist = gennodelst(filerevlog)
2259 if nodelist:
2265 if nodelist:
2260 count[0] += 1
2266 count[0] += 1
2261 yield bundler.fileheader(fname)
2267 yield bundler.fileheader(fname)
2262 for chunk in filerevlog.group(nodelist, bundler, reorder):
2268 for chunk in filerevlog.group(nodelist, bundler, reorder):
2263 yield chunk
2269 yield chunk
2264 yield bundler.close()
2270 yield bundler.close()
2265 progress(_bundling, None)
2271 progress(_bundling, None)
2266
2272
2267 if nodes:
2273 if nodes:
2268 self.hook('outgoing', node=hex(nodes[0]), source=source)
2274 self.hook('outgoing', node=hex(nodes[0]), source=source)
2269
2275
2270 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2276 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2271
2277
2272 def addchangegroup(self, source, srctype, url, emptyok=False):
2278 def addchangegroup(self, source, srctype, url, emptyok=False):
2273 """Add the changegroup returned by source.read() to this repo.
2279 """Add the changegroup returned by source.read() to this repo.
2274 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2280 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2275 the URL of the repo where this changegroup is coming from.
2281 the URL of the repo where this changegroup is coming from.
2276
2282
2277 Return an integer summarizing the change to this repo:
2283 Return an integer summarizing the change to this repo:
2278 - nothing changed or no source: 0
2284 - nothing changed or no source: 0
2279 - more heads than before: 1+added heads (2..n)
2285 - more heads than before: 1+added heads (2..n)
2280 - fewer heads than before: -1-removed heads (-2..-n)
2286 - fewer heads than before: -1-removed heads (-2..-n)
2281 - number of heads stays the same: 1
2287 - number of heads stays the same: 1
2282 """
2288 """
2283 def csmap(x):
2289 def csmap(x):
2284 self.ui.debug("add changeset %s\n" % short(x))
2290 self.ui.debug("add changeset %s\n" % short(x))
2285 return len(cl)
2291 return len(cl)
2286
2292
2287 def revmap(x):
2293 def revmap(x):
2288 return cl.rev(x)
2294 return cl.rev(x)
2289
2295
2290 if not source:
2296 if not source:
2291 return 0
2297 return 0
2292
2298
2293 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2299 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2294
2300
2295 changesets = files = revisions = 0
2301 changesets = files = revisions = 0
2296 efiles = set()
2302 efiles = set()
2297
2303
2298 # write changelog data to temp files so concurrent readers will not see
2304 # write changelog data to temp files so concurrent readers will not see
2299 # inconsistent view
2305 # inconsistent view
2300 cl = self.changelog
2306 cl = self.changelog
2301 cl.delayupdate()
2307 cl.delayupdate()
2302 oldheads = cl.heads()
2308 oldheads = cl.heads()
2303
2309
2304 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2310 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2305 try:
2311 try:
2306 trp = weakref.proxy(tr)
2312 trp = weakref.proxy(tr)
2307 # pull off the changeset group
2313 # pull off the changeset group
2308 self.ui.status(_("adding changesets\n"))
2314 self.ui.status(_("adding changesets\n"))
2309 clstart = len(cl)
2315 clstart = len(cl)
2310 class prog(object):
2316 class prog(object):
2311 step = _('changesets')
2317 step = _('changesets')
2312 count = 1
2318 count = 1
2313 ui = self.ui
2319 ui = self.ui
2314 total = None
2320 total = None
2315 def __call__(self):
2321 def __call__(self):
2316 self.ui.progress(self.step, self.count, unit=_('chunks'),
2322 self.ui.progress(self.step, self.count, unit=_('chunks'),
2317 total=self.total)
2323 total=self.total)
2318 self.count += 1
2324 self.count += 1
2319 pr = prog()
2325 pr = prog()
2320 source.callback = pr
2326 source.callback = pr
2321
2327
2322 source.changelogheader()
2328 source.changelogheader()
2323 srccontent = cl.addgroup(source, csmap, trp)
2329 srccontent = cl.addgroup(source, csmap, trp)
2324 if not (srccontent or emptyok):
2330 if not (srccontent or emptyok):
2325 raise util.Abort(_("received changelog group is empty"))
2331 raise util.Abort(_("received changelog group is empty"))
2326 clend = len(cl)
2332 clend = len(cl)
2327 changesets = clend - clstart
2333 changesets = clend - clstart
2328 for c in xrange(clstart, clend):
2334 for c in xrange(clstart, clend):
2329 efiles.update(self[c].files())
2335 efiles.update(self[c].files())
2330 efiles = len(efiles)
2336 efiles = len(efiles)
2331 self.ui.progress(_('changesets'), None)
2337 self.ui.progress(_('changesets'), None)
2332
2338
2333 # pull off the manifest group
2339 # pull off the manifest group
2334 self.ui.status(_("adding manifests\n"))
2340 self.ui.status(_("adding manifests\n"))
2335 pr.step = _('manifests')
2341 pr.step = _('manifests')
2336 pr.count = 1
2342 pr.count = 1
2337 pr.total = changesets # manifests <= changesets
2343 pr.total = changesets # manifests <= changesets
2338 # no need to check for empty manifest group here:
2344 # no need to check for empty manifest group here:
2339 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2345 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2340 # no new manifest will be created and the manifest group will
2346 # no new manifest will be created and the manifest group will
2341 # be empty during the pull
2347 # be empty during the pull
2342 source.manifestheader()
2348 source.manifestheader()
2343 self.manifest.addgroup(source, revmap, trp)
2349 self.manifest.addgroup(source, revmap, trp)
2344 self.ui.progress(_('manifests'), None)
2350 self.ui.progress(_('manifests'), None)
2345
2351
2346 needfiles = {}
2352 needfiles = {}
2347 if self.ui.configbool('server', 'validate', default=False):
2353 if self.ui.configbool('server', 'validate', default=False):
2348 # validate incoming csets have their manifests
2354 # validate incoming csets have their manifests
2349 for cset in xrange(clstart, clend):
2355 for cset in xrange(clstart, clend):
2350 mfest = self.changelog.read(self.changelog.node(cset))[0]
2356 mfest = self.changelog.read(self.changelog.node(cset))[0]
2351 mfest = self.manifest.readdelta(mfest)
2357 mfest = self.manifest.readdelta(mfest)
2352 # store file nodes we must see
2358 # store file nodes we must see
2353 for f, n in mfest.iteritems():
2359 for f, n in mfest.iteritems():
2354 needfiles.setdefault(f, set()).add(n)
2360 needfiles.setdefault(f, set()).add(n)
2355
2361
2356 # process the files
2362 # process the files
2357 self.ui.status(_("adding file changes\n"))
2363 self.ui.status(_("adding file changes\n"))
2358 pr.step = _('files')
2364 pr.step = _('files')
2359 pr.count = 1
2365 pr.count = 1
2360 pr.total = efiles
2366 pr.total = efiles
2361 source.callback = None
2367 source.callback = None
2362
2368
2363 while True:
2369 while True:
2364 chunkdata = source.filelogheader()
2370 chunkdata = source.filelogheader()
2365 if not chunkdata:
2371 if not chunkdata:
2366 break
2372 break
2367 f = chunkdata["filename"]
2373 f = chunkdata["filename"]
2368 self.ui.debug("adding %s revisions\n" % f)
2374 self.ui.debug("adding %s revisions\n" % f)
2369 pr()
2375 pr()
2370 fl = self.file(f)
2376 fl = self.file(f)
2371 o = len(fl)
2377 o = len(fl)
2372 if not fl.addgroup(source, revmap, trp):
2378 if not fl.addgroup(source, revmap, trp):
2373 raise util.Abort(_("received file revlog group is empty"))
2379 raise util.Abort(_("received file revlog group is empty"))
2374 revisions += len(fl) - o
2380 revisions += len(fl) - o
2375 files += 1
2381 files += 1
2376 if f in needfiles:
2382 if f in needfiles:
2377 needs = needfiles[f]
2383 needs = needfiles[f]
2378 for new in xrange(o, len(fl)):
2384 for new in xrange(o, len(fl)):
2379 n = fl.node(new)
2385 n = fl.node(new)
2380 if n in needs:
2386 if n in needs:
2381 needs.remove(n)
2387 needs.remove(n)
2382 if not needs:
2388 if not needs:
2383 del needfiles[f]
2389 del needfiles[f]
2384 self.ui.progress(_('files'), None)
2390 self.ui.progress(_('files'), None)
2385
2391
2386 for f, needs in needfiles.iteritems():
2392 for f, needs in needfiles.iteritems():
2387 fl = self.file(f)
2393 fl = self.file(f)
2388 for n in needs:
2394 for n in needs:
2389 try:
2395 try:
2390 fl.rev(n)
2396 fl.rev(n)
2391 except error.LookupError:
2397 except error.LookupError:
2392 raise util.Abort(
2398 raise util.Abort(
2393 _('missing file data for %s:%s - run hg verify') %
2399 _('missing file data for %s:%s - run hg verify') %
2394 (f, hex(n)))
2400 (f, hex(n)))
2395
2401
2396 dh = 0
2402 dh = 0
2397 if oldheads:
2403 if oldheads:
2398 heads = cl.heads()
2404 heads = cl.heads()
2399 dh = len(heads) - len(oldheads)
2405 dh = len(heads) - len(oldheads)
2400 for h in heads:
2406 for h in heads:
2401 if h not in oldheads and self[h].closesbranch():
2407 if h not in oldheads and self[h].closesbranch():
2402 dh -= 1
2408 dh -= 1
2403 htext = ""
2409 htext = ""
2404 if dh:
2410 if dh:
2405 htext = _(" (%+d heads)") % dh
2411 htext = _(" (%+d heads)") % dh
2406
2412
2407 self.ui.status(_("added %d changesets"
2413 self.ui.status(_("added %d changesets"
2408 " with %d changes to %d files%s\n")
2414 " with %d changes to %d files%s\n")
2409 % (changesets, revisions, files, htext))
2415 % (changesets, revisions, files, htext))
2410 obsolete.clearobscaches(self)
2416 obsolete.clearobscaches(self)
2411
2417
2412 if changesets > 0:
2418 if changesets > 0:
2413 p = lambda: cl.writepending() and self.root or ""
2419 p = lambda: cl.writepending() and self.root or ""
2414 self.hook('pretxnchangegroup', throw=True,
2420 self.hook('pretxnchangegroup', throw=True,
2415 node=hex(cl.node(clstart)), source=srctype,
2421 node=hex(cl.node(clstart)), source=srctype,
2416 url=url, pending=p)
2422 url=url, pending=p)
2417
2423
2418 added = [cl.node(r) for r in xrange(clstart, clend)]
2424 added = [cl.node(r) for r in xrange(clstart, clend)]
2419 publishing = self.ui.configbool('phases', 'publish', True)
2425 publishing = self.ui.configbool('phases', 'publish', True)
2420 if srctype == 'push':
2426 if srctype == 'push':
2421 # Old server can not push the boundary themself.
2427 # Old server can not push the boundary themself.
2422 # New server won't push the boundary if changeset already
2428 # New server won't push the boundary if changeset already
2423 # existed locally as secrete
2429 # existed locally as secrete
2424 #
2430 #
2425 # We should not use added here but the list of all change in
2431 # We should not use added here but the list of all change in
2426 # the bundle
2432 # the bundle
2427 if publishing:
2433 if publishing:
2428 phases.advanceboundary(self, phases.public, srccontent)
2434 phases.advanceboundary(self, phases.public, srccontent)
2429 else:
2435 else:
2430 phases.advanceboundary(self, phases.draft, srccontent)
2436 phases.advanceboundary(self, phases.draft, srccontent)
2431 phases.retractboundary(self, phases.draft, added)
2437 phases.retractboundary(self, phases.draft, added)
2432 elif srctype != 'strip':
2438 elif srctype != 'strip':
2433 # publishing only alter behavior during push
2439 # publishing only alter behavior during push
2434 #
2440 #
2435 # strip should not touch boundary at all
2441 # strip should not touch boundary at all
2436 phases.retractboundary(self, phases.draft, added)
2442 phases.retractboundary(self, phases.draft, added)
2437
2443
2438 # make changelog see real files again
2444 # make changelog see real files again
2439 cl.finalize(trp)
2445 cl.finalize(trp)
2440
2446
2441 tr.close()
2447 tr.close()
2442
2448
2443 if changesets > 0:
2449 if changesets > 0:
2444 self.updatebranchcache()
2450 self.updatebranchcache()
2445 def runhooks():
2451 def runhooks():
2446 # forcefully update the on-disk branch cache
2452 # forcefully update the on-disk branch cache
2447 self.ui.debug("updating the branch cache\n")
2453 self.ui.debug("updating the branch cache\n")
2448 self.hook("changegroup", node=hex(cl.node(clstart)),
2454 self.hook("changegroup", node=hex(cl.node(clstart)),
2449 source=srctype, url=url)
2455 source=srctype, url=url)
2450
2456
2451 for n in added:
2457 for n in added:
2452 self.hook("incoming", node=hex(n), source=srctype,
2458 self.hook("incoming", node=hex(n), source=srctype,
2453 url=url)
2459 url=url)
2454 self._afterlock(runhooks)
2460 self._afterlock(runhooks)
2455
2461
2456 finally:
2462 finally:
2457 tr.release()
2463 tr.release()
2458 # never return 0 here:
2464 # never return 0 here:
2459 if dh < 0:
2465 if dh < 0:
2460 return dh - 1
2466 return dh - 1
2461 else:
2467 else:
2462 return dh + 1
2468 return dh + 1
2463
2469
2464 def stream_in(self, remote, requirements):
2470 def stream_in(self, remote, requirements):
2465 lock = self.lock()
2471 lock = self.lock()
2466 try:
2472 try:
2467 # Save remote branchmap. We will use it later
2473 # Save remote branchmap. We will use it later
2468 # to speed up branchcache creation
2474 # to speed up branchcache creation
2469 rbranchmap = None
2475 rbranchmap = None
2470 if remote.capable("branchmap"):
2476 if remote.capable("branchmap"):
2471 rbranchmap = remote.branchmap()
2477 rbranchmap = remote.branchmap()
2472
2478
2473 fp = remote.stream_out()
2479 fp = remote.stream_out()
2474 l = fp.readline()
2480 l = fp.readline()
2475 try:
2481 try:
2476 resp = int(l)
2482 resp = int(l)
2477 except ValueError:
2483 except ValueError:
2478 raise error.ResponseError(
2484 raise error.ResponseError(
2479 _('unexpected response from remote server:'), l)
2485 _('unexpected response from remote server:'), l)
2480 if resp == 1:
2486 if resp == 1:
2481 raise util.Abort(_('operation forbidden by server'))
2487 raise util.Abort(_('operation forbidden by server'))
2482 elif resp == 2:
2488 elif resp == 2:
2483 raise util.Abort(_('locking the remote repository failed'))
2489 raise util.Abort(_('locking the remote repository failed'))
2484 elif resp != 0:
2490 elif resp != 0:
2485 raise util.Abort(_('the server sent an unknown error code'))
2491 raise util.Abort(_('the server sent an unknown error code'))
2486 self.ui.status(_('streaming all changes\n'))
2492 self.ui.status(_('streaming all changes\n'))
2487 l = fp.readline()
2493 l = fp.readline()
2488 try:
2494 try:
2489 total_files, total_bytes = map(int, l.split(' ', 1))
2495 total_files, total_bytes = map(int, l.split(' ', 1))
2490 except (ValueError, TypeError):
2496 except (ValueError, TypeError):
2491 raise error.ResponseError(
2497 raise error.ResponseError(
2492 _('unexpected response from remote server:'), l)
2498 _('unexpected response from remote server:'), l)
2493 self.ui.status(_('%d files to transfer, %s of data\n') %
2499 self.ui.status(_('%d files to transfer, %s of data\n') %
2494 (total_files, util.bytecount(total_bytes)))
2500 (total_files, util.bytecount(total_bytes)))
2495 handled_bytes = 0
2501 handled_bytes = 0
2496 self.ui.progress(_('clone'), 0, total=total_bytes)
2502 self.ui.progress(_('clone'), 0, total=total_bytes)
2497 start = time.time()
2503 start = time.time()
2498 for i in xrange(total_files):
2504 for i in xrange(total_files):
2499 # XXX doesn't support '\n' or '\r' in filenames
2505 # XXX doesn't support '\n' or '\r' in filenames
2500 l = fp.readline()
2506 l = fp.readline()
2501 try:
2507 try:
2502 name, size = l.split('\0', 1)
2508 name, size = l.split('\0', 1)
2503 size = int(size)
2509 size = int(size)
2504 except (ValueError, TypeError):
2510 except (ValueError, TypeError):
2505 raise error.ResponseError(
2511 raise error.ResponseError(
2506 _('unexpected response from remote server:'), l)
2512 _('unexpected response from remote server:'), l)
2507 if self.ui.debugflag:
2513 if self.ui.debugflag:
2508 self.ui.debug('adding %s (%s)\n' %
2514 self.ui.debug('adding %s (%s)\n' %
2509 (name, util.bytecount(size)))
2515 (name, util.bytecount(size)))
2510 # for backwards compat, name was partially encoded
2516 # for backwards compat, name was partially encoded
2511 ofp = self.sopener(store.decodedir(name), 'w')
2517 ofp = self.sopener(store.decodedir(name), 'w')
2512 for chunk in util.filechunkiter(fp, limit=size):
2518 for chunk in util.filechunkiter(fp, limit=size):
2513 handled_bytes += len(chunk)
2519 handled_bytes += len(chunk)
2514 self.ui.progress(_('clone'), handled_bytes,
2520 self.ui.progress(_('clone'), handled_bytes,
2515 total=total_bytes)
2521 total=total_bytes)
2516 ofp.write(chunk)
2522 ofp.write(chunk)
2517 ofp.close()
2523 ofp.close()
2518 elapsed = time.time() - start
2524 elapsed = time.time() - start
2519 if elapsed <= 0:
2525 if elapsed <= 0:
2520 elapsed = 0.001
2526 elapsed = 0.001
2521 self.ui.progress(_('clone'), None)
2527 self.ui.progress(_('clone'), None)
2522 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2528 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2523 (util.bytecount(total_bytes), elapsed,
2529 (util.bytecount(total_bytes), elapsed,
2524 util.bytecount(total_bytes / elapsed)))
2530 util.bytecount(total_bytes / elapsed)))
2525
2531
2526 # new requirements = old non-format requirements +
2532 # new requirements = old non-format requirements +
2527 # new format-related
2533 # new format-related
2528 # requirements from the streamed-in repository
2534 # requirements from the streamed-in repository
2529 requirements.update(set(self.requirements) - self.supportedformats)
2535 requirements.update(set(self.requirements) - self.supportedformats)
2530 self._applyrequirements(requirements)
2536 self._applyrequirements(requirements)
2531 self._writerequirements()
2537 self._writerequirements()
2532
2538
2533 if rbranchmap:
2539 if rbranchmap:
2534 rbheads = []
2540 rbheads = []
2535 for bheads in rbranchmap.itervalues():
2541 for bheads in rbranchmap.itervalues():
2536 rbheads.extend(bheads)
2542 rbheads.extend(bheads)
2537
2543
2538 self.branchcache = rbranchmap
2544 self.branchcache = rbranchmap
2539 if rbheads:
2545 if rbheads:
2540 rtiprev = max((int(self.changelog.rev(node))
2546 rtiprev = max((int(self.changelog.rev(node))
2541 for node in rbheads))
2547 for node in rbheads))
2542 self._writebranchcache(self.branchcache,
2548 self._writebranchcache(self.branchcache,
2543 self[rtiprev].node(), rtiprev)
2549 self[rtiprev].node(), rtiprev)
2544 self.invalidate()
2550 self.invalidate()
2545 return len(self.heads()) + 1
2551 return len(self.heads()) + 1
2546 finally:
2552 finally:
2547 lock.release()
2553 lock.release()
2548
2554
2549 def clone(self, remote, heads=[], stream=False):
2555 def clone(self, remote, heads=[], stream=False):
2550 '''clone remote repository.
2556 '''clone remote repository.
2551
2557
2552 keyword arguments:
2558 keyword arguments:
2553 heads: list of revs to clone (forces use of pull)
2559 heads: list of revs to clone (forces use of pull)
2554 stream: use streaming clone if possible'''
2560 stream: use streaming clone if possible'''
2555
2561
2556 # now, all clients that can request uncompressed clones can
2562 # now, all clients that can request uncompressed clones can
2557 # read repo formats supported by all servers that can serve
2563 # read repo formats supported by all servers that can serve
2558 # them.
2564 # them.
2559
2565
2560 # if revlog format changes, client will have to check version
2566 # if revlog format changes, client will have to check version
2561 # and format flags on "stream" capability, and use
2567 # and format flags on "stream" capability, and use
2562 # uncompressed only if compatible.
2568 # uncompressed only if compatible.
2563
2569
2564 if not stream:
2570 if not stream:
2565 # if the server explicitly prefers to stream (for fast LANs)
2571 # if the server explicitly prefers to stream (for fast LANs)
2566 stream = remote.capable('stream-preferred')
2572 stream = remote.capable('stream-preferred')
2567
2573
2568 if stream and not heads:
2574 if stream and not heads:
2569 # 'stream' means remote revlog format is revlogv1 only
2575 # 'stream' means remote revlog format is revlogv1 only
2570 if remote.capable('stream'):
2576 if remote.capable('stream'):
2571 return self.stream_in(remote, set(('revlogv1',)))
2577 return self.stream_in(remote, set(('revlogv1',)))
2572 # otherwise, 'streamreqs' contains the remote revlog format
2578 # otherwise, 'streamreqs' contains the remote revlog format
2573 streamreqs = remote.capable('streamreqs')
2579 streamreqs = remote.capable('streamreqs')
2574 if streamreqs:
2580 if streamreqs:
2575 streamreqs = set(streamreqs.split(','))
2581 streamreqs = set(streamreqs.split(','))
2576 # if we support it, stream in and adjust our requirements
2582 # if we support it, stream in and adjust our requirements
2577 if not streamreqs - self.supportedformats:
2583 if not streamreqs - self.supportedformats:
2578 return self.stream_in(remote, streamreqs)
2584 return self.stream_in(remote, streamreqs)
2579 return self.pull(remote, heads)
2585 return self.pull(remote, heads)
2580
2586
2581 def pushkey(self, namespace, key, old, new):
2587 def pushkey(self, namespace, key, old, new):
2582 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2588 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2583 old=old, new=new)
2589 old=old, new=new)
2584 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2590 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2585 ret = pushkey.push(self, namespace, key, old, new)
2591 ret = pushkey.push(self, namespace, key, old, new)
2586 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2592 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2587 ret=ret)
2593 ret=ret)
2588 return ret
2594 return ret
2589
2595
2590 def listkeys(self, namespace):
2596 def listkeys(self, namespace):
2591 self.hook('prelistkeys', throw=True, namespace=namespace)
2597 self.hook('prelistkeys', throw=True, namespace=namespace)
2592 self.ui.debug('listing keys for "%s"\n' % namespace)
2598 self.ui.debug('listing keys for "%s"\n' % namespace)
2593 values = pushkey.list(self, namespace)
2599 values = pushkey.list(self, namespace)
2594 self.hook('listkeys', namespace=namespace, values=values)
2600 self.hook('listkeys', namespace=namespace, values=values)
2595 return values
2601 return values
2596
2602
2597 def debugwireargs(self, one, two, three=None, four=None, five=None):
2603 def debugwireargs(self, one, two, three=None, four=None, five=None):
2598 '''used to test argument passing over the wire'''
2604 '''used to test argument passing over the wire'''
2599 return "%s %s %s %s %s" % (one, two, three, four, five)
2605 return "%s %s %s %s %s" % (one, two, three, four, five)
2600
2606
2601 def savecommitmessage(self, text):
2607 def savecommitmessage(self, text):
2602 fp = self.opener('last-message.txt', 'wb')
2608 fp = self.opener('last-message.txt', 'wb')
2603 try:
2609 try:
2604 fp.write(text)
2610 fp.write(text)
2605 finally:
2611 finally:
2606 fp.close()
2612 fp.close()
2607 return self.pathto(fp.name[len(self.root)+1:])
2613 return self.pathto(fp.name[len(self.root)+1:])
2608
2614
2609 # used to avoid circular references so destructors work
2615 # used to avoid circular references so destructors work
2610 def aftertrans(files):
2616 def aftertrans(files):
2611 renamefiles = [tuple(t) for t in files]
2617 renamefiles = [tuple(t) for t in files]
2612 def a():
2618 def a():
2613 for src, dest in renamefiles:
2619 for src, dest in renamefiles:
2614 try:
2620 try:
2615 util.rename(src, dest)
2621 util.rename(src, dest)
2616 except OSError: # journal file does not yet exist
2622 except OSError: # journal file does not yet exist
2617 pass
2623 pass
2618 return a
2624 return a
2619
2625
2620 def undoname(fn):
2626 def undoname(fn):
2621 base, name = os.path.split(fn)
2627 base, name = os.path.split(fn)
2622 assert name.startswith('journal')
2628 assert name.startswith('journal')
2623 return os.path.join(base, name.replace('journal', 'undo', 1))
2629 return os.path.join(base, name.replace('journal', 'undo', 1))
2624
2630
2625 def instance(ui, path, create):
2631 def instance(ui, path, create):
2626 return localrepository(ui, util.urllocalpath(path), create)
2632 return localrepository(ui, util.urllocalpath(path), create)
2627
2633
2628 def islocal(path):
2634 def islocal(path):
2629 return True
2635 return True
General Comments 0
You need to be logged in to leave comments. Login now