##// END OF EJS Templates
obsolete: fix typos in comments introduced by 6955d69a52a4
Thomas Arendsen Hein -
r17306:7d2967de stable
parent child Browse files
Show More
@@ -1,2602 +1,2602 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import bin, hex, nullid, nullrev, short
7 from node import bin, hex, nullid, nullrev, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock, transaction, store, encoding, base85
11 import lock, transaction, store, encoding, base85
12 import scmutil, util, extensions, hook, error, revset
12 import scmutil, util, extensions, hook, error, revset
13 import match as matchmod
13 import match as matchmod
14 import merge as mergemod
14 import merge as mergemod
15 import tags as tagsmod
15 import tags as tagsmod
16 from lock import release
16 from lock import release
17 import weakref, errno, os, time, inspect
17 import weakref, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19 filecache = scmutil.filecache
19 filecache = scmutil.filecache
20
20
21 class storecache(filecache):
21 class storecache(filecache):
22 """filecache for files in the store"""
22 """filecache for files in the store"""
23 def join(self, obj, fname):
23 def join(self, obj, fname):
24 return obj.sjoin(fname)
24 return obj.sjoin(fname)
25
25
26 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
26 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
27 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
27 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
28
28
29 class localpeer(peer.peerrepository):
29 class localpeer(peer.peerrepository):
30 '''peer for a local repo; reflects only the most recent API'''
30 '''peer for a local repo; reflects only the most recent API'''
31
31
32 def __init__(self, repo, caps=MODERNCAPS):
32 def __init__(self, repo, caps=MODERNCAPS):
33 peer.peerrepository.__init__(self)
33 peer.peerrepository.__init__(self)
34 self._repo = repo
34 self._repo = repo
35 self.ui = repo.ui
35 self.ui = repo.ui
36 self._caps = repo._restrictcapabilities(caps)
36 self._caps = repo._restrictcapabilities(caps)
37 self.requirements = repo.requirements
37 self.requirements = repo.requirements
38 self.supportedformats = repo.supportedformats
38 self.supportedformats = repo.supportedformats
39
39
40 def close(self):
40 def close(self):
41 self._repo.close()
41 self._repo.close()
42
42
43 def _capabilities(self):
43 def _capabilities(self):
44 return self._caps
44 return self._caps
45
45
46 def local(self):
46 def local(self):
47 return self._repo
47 return self._repo
48
48
49 def canpush(self):
49 def canpush(self):
50 return True
50 return True
51
51
52 def url(self):
52 def url(self):
53 return self._repo.url()
53 return self._repo.url()
54
54
55 def lookup(self, key):
55 def lookup(self, key):
56 return self._repo.lookup(key)
56 return self._repo.lookup(key)
57
57
58 def branchmap(self):
58 def branchmap(self):
59 return discovery.visiblebranchmap(self._repo)
59 return discovery.visiblebranchmap(self._repo)
60
60
61 def heads(self):
61 def heads(self):
62 return discovery.visibleheads(self._repo)
62 return discovery.visibleheads(self._repo)
63
63
64 def known(self, nodes):
64 def known(self, nodes):
65 return self._repo.known(nodes)
65 return self._repo.known(nodes)
66
66
67 def getbundle(self, source, heads=None, common=None):
67 def getbundle(self, source, heads=None, common=None):
68 return self._repo.getbundle(source, heads=heads, common=common)
68 return self._repo.getbundle(source, heads=heads, common=common)
69
69
70 # TODO We might want to move the next two calls into legacypeer and add
70 # TODO We might want to move the next two calls into legacypeer and add
71 # unbundle instead.
71 # unbundle instead.
72
72
73 def lock(self):
73 def lock(self):
74 return self._repo.lock()
74 return self._repo.lock()
75
75
76 def addchangegroup(self, cg, source, url):
76 def addchangegroup(self, cg, source, url):
77 return self._repo.addchangegroup(cg, source, url)
77 return self._repo.addchangegroup(cg, source, url)
78
78
79 def pushkey(self, namespace, key, old, new):
79 def pushkey(self, namespace, key, old, new):
80 return self._repo.pushkey(namespace, key, old, new)
80 return self._repo.pushkey(namespace, key, old, new)
81
81
82 def listkeys(self, namespace):
82 def listkeys(self, namespace):
83 return self._repo.listkeys(namespace)
83 return self._repo.listkeys(namespace)
84
84
85 def debugwireargs(self, one, two, three=None, four=None, five=None):
85 def debugwireargs(self, one, two, three=None, four=None, five=None):
86 '''used to test argument passing over the wire'''
86 '''used to test argument passing over the wire'''
87 return "%s %s %s %s %s" % (one, two, three, four, five)
87 return "%s %s %s %s %s" % (one, two, three, four, five)
88
88
89 class locallegacypeer(localpeer):
89 class locallegacypeer(localpeer):
90 '''peer extension which implements legacy methods too; used for tests with
90 '''peer extension which implements legacy methods too; used for tests with
91 restricted capabilities'''
91 restricted capabilities'''
92
92
93 def __init__(self, repo):
93 def __init__(self, repo):
94 localpeer.__init__(self, repo, caps=LEGACYCAPS)
94 localpeer.__init__(self, repo, caps=LEGACYCAPS)
95
95
96 def branches(self, nodes):
96 def branches(self, nodes):
97 return self._repo.branches(nodes)
97 return self._repo.branches(nodes)
98
98
99 def between(self, pairs):
99 def between(self, pairs):
100 return self._repo.between(pairs)
100 return self._repo.between(pairs)
101
101
102 def changegroup(self, basenodes, source):
102 def changegroup(self, basenodes, source):
103 return self._repo.changegroup(basenodes, source)
103 return self._repo.changegroup(basenodes, source)
104
104
105 def changegroupsubset(self, bases, heads, source):
105 def changegroupsubset(self, bases, heads, source):
106 return self._repo.changegroupsubset(bases, heads, source)
106 return self._repo.changegroupsubset(bases, heads, source)
107
107
108 class localrepository(object):
108 class localrepository(object):
109
109
110 supportedformats = set(('revlogv1', 'generaldelta'))
110 supportedformats = set(('revlogv1', 'generaldelta'))
111 supported = supportedformats | set(('store', 'fncache', 'shared',
111 supported = supportedformats | set(('store', 'fncache', 'shared',
112 'dotencode'))
112 'dotencode'))
113 openerreqs = set(('revlogv1', 'generaldelta'))
113 openerreqs = set(('revlogv1', 'generaldelta'))
114 requirements = ['revlogv1']
114 requirements = ['revlogv1']
115
115
116 def _baserequirements(self, create):
116 def _baserequirements(self, create):
117 return self.requirements[:]
117 return self.requirements[:]
118
118
119 def __init__(self, baseui, path=None, create=False):
119 def __init__(self, baseui, path=None, create=False):
120 self.wopener = scmutil.opener(path, expand=True)
120 self.wopener = scmutil.opener(path, expand=True)
121 self.wvfs = self.wopener
121 self.wvfs = self.wopener
122 self.root = self.wvfs.base
122 self.root = self.wvfs.base
123 self.path = self.wvfs.join(".hg")
123 self.path = self.wvfs.join(".hg")
124 self.origroot = path
124 self.origroot = path
125 self.auditor = scmutil.pathauditor(self.root, self._checknested)
125 self.auditor = scmutil.pathauditor(self.root, self._checknested)
126 self.opener = scmutil.opener(self.path)
126 self.opener = scmutil.opener(self.path)
127 self.vfs = self.opener
127 self.vfs = self.opener
128 self.baseui = baseui
128 self.baseui = baseui
129 self.ui = baseui.copy()
129 self.ui = baseui.copy()
130 # A list of callback to shape the phase if no data were found.
130 # A list of callback to shape the phase if no data were found.
131 # Callback are in the form: func(repo, roots) --> processed root.
131 # Callback are in the form: func(repo, roots) --> processed root.
132 # This list it to be filled by extension during repo setup
132 # This list it to be filled by extension during repo setup
133 self._phasedefaults = []
133 self._phasedefaults = []
134 try:
134 try:
135 self.ui.readconfig(self.join("hgrc"), self.root)
135 self.ui.readconfig(self.join("hgrc"), self.root)
136 extensions.loadall(self.ui)
136 extensions.loadall(self.ui)
137 except IOError:
137 except IOError:
138 pass
138 pass
139
139
140 if not self.vfs.isdir():
140 if not self.vfs.isdir():
141 if create:
141 if create:
142 if not self.wvfs.exists():
142 if not self.wvfs.exists():
143 self.wvfs.makedirs()
143 self.wvfs.makedirs()
144 self.vfs.makedir(notindexed=True)
144 self.vfs.makedir(notindexed=True)
145 requirements = self._baserequirements(create)
145 requirements = self._baserequirements(create)
146 if self.ui.configbool('format', 'usestore', True):
146 if self.ui.configbool('format', 'usestore', True):
147 self.vfs.mkdir("store")
147 self.vfs.mkdir("store")
148 requirements.append("store")
148 requirements.append("store")
149 if self.ui.configbool('format', 'usefncache', True):
149 if self.ui.configbool('format', 'usefncache', True):
150 requirements.append("fncache")
150 requirements.append("fncache")
151 if self.ui.configbool('format', 'dotencode', True):
151 if self.ui.configbool('format', 'dotencode', True):
152 requirements.append('dotencode')
152 requirements.append('dotencode')
153 # create an invalid changelog
153 # create an invalid changelog
154 self.vfs.append(
154 self.vfs.append(
155 "00changelog.i",
155 "00changelog.i",
156 '\0\0\0\2' # represents revlogv2
156 '\0\0\0\2' # represents revlogv2
157 ' dummy changelog to prevent using the old repo layout'
157 ' dummy changelog to prevent using the old repo layout'
158 )
158 )
159 if self.ui.configbool('format', 'generaldelta', False):
159 if self.ui.configbool('format', 'generaldelta', False):
160 requirements.append("generaldelta")
160 requirements.append("generaldelta")
161 requirements = set(requirements)
161 requirements = set(requirements)
162 else:
162 else:
163 raise error.RepoError(_("repository %s not found") % path)
163 raise error.RepoError(_("repository %s not found") % path)
164 elif create:
164 elif create:
165 raise error.RepoError(_("repository %s already exists") % path)
165 raise error.RepoError(_("repository %s already exists") % path)
166 else:
166 else:
167 try:
167 try:
168 requirements = scmutil.readrequires(self.vfs, self.supported)
168 requirements = scmutil.readrequires(self.vfs, self.supported)
169 except IOError, inst:
169 except IOError, inst:
170 if inst.errno != errno.ENOENT:
170 if inst.errno != errno.ENOENT:
171 raise
171 raise
172 requirements = set()
172 requirements = set()
173
173
174 self.sharedpath = self.path
174 self.sharedpath = self.path
175 try:
175 try:
176 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
176 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
177 if not os.path.exists(s):
177 if not os.path.exists(s):
178 raise error.RepoError(
178 raise error.RepoError(
179 _('.hg/sharedpath points to nonexistent directory %s') % s)
179 _('.hg/sharedpath points to nonexistent directory %s') % s)
180 self.sharedpath = s
180 self.sharedpath = s
181 except IOError, inst:
181 except IOError, inst:
182 if inst.errno != errno.ENOENT:
182 if inst.errno != errno.ENOENT:
183 raise
183 raise
184
184
185 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
185 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
186 self.spath = self.store.path
186 self.spath = self.store.path
187 self.sopener = self.store.opener
187 self.sopener = self.store.opener
188 self.svfs = self.sopener
188 self.svfs = self.sopener
189 self.sjoin = self.store.join
189 self.sjoin = self.store.join
190 self.opener.createmode = self.store.createmode
190 self.opener.createmode = self.store.createmode
191 self._applyrequirements(requirements)
191 self._applyrequirements(requirements)
192 if create:
192 if create:
193 self._writerequirements()
193 self._writerequirements()
194
194
195
195
196 self._branchcache = None
196 self._branchcache = None
197 self._branchcachetip = None
197 self._branchcachetip = None
198 self.filterpats = {}
198 self.filterpats = {}
199 self._datafilters = {}
199 self._datafilters = {}
200 self._transref = self._lockref = self._wlockref = None
200 self._transref = self._lockref = self._wlockref = None
201
201
202 # A cache for various files under .hg/ that tracks file changes,
202 # A cache for various files under .hg/ that tracks file changes,
203 # (used by the filecache decorator)
203 # (used by the filecache decorator)
204 #
204 #
205 # Maps a property name to its util.filecacheentry
205 # Maps a property name to its util.filecacheentry
206 self._filecache = {}
206 self._filecache = {}
207
207
208 def close(self):
208 def close(self):
209 pass
209 pass
210
210
211 def _restrictcapabilities(self, caps):
211 def _restrictcapabilities(self, caps):
212 return caps
212 return caps
213
213
214 def _applyrequirements(self, requirements):
214 def _applyrequirements(self, requirements):
215 self.requirements = requirements
215 self.requirements = requirements
216 self.sopener.options = dict((r, 1) for r in requirements
216 self.sopener.options = dict((r, 1) for r in requirements
217 if r in self.openerreqs)
217 if r in self.openerreqs)
218
218
219 def _writerequirements(self):
219 def _writerequirements(self):
220 reqfile = self.opener("requires", "w")
220 reqfile = self.opener("requires", "w")
221 for r in self.requirements:
221 for r in self.requirements:
222 reqfile.write("%s\n" % r)
222 reqfile.write("%s\n" % r)
223 reqfile.close()
223 reqfile.close()
224
224
225 def _checknested(self, path):
225 def _checknested(self, path):
226 """Determine if path is a legal nested repository."""
226 """Determine if path is a legal nested repository."""
227 if not path.startswith(self.root):
227 if not path.startswith(self.root):
228 return False
228 return False
229 subpath = path[len(self.root) + 1:]
229 subpath = path[len(self.root) + 1:]
230 normsubpath = util.pconvert(subpath)
230 normsubpath = util.pconvert(subpath)
231
231
232 # XXX: Checking against the current working copy is wrong in
232 # XXX: Checking against the current working copy is wrong in
233 # the sense that it can reject things like
233 # the sense that it can reject things like
234 #
234 #
235 # $ hg cat -r 10 sub/x.txt
235 # $ hg cat -r 10 sub/x.txt
236 #
236 #
237 # if sub/ is no longer a subrepository in the working copy
237 # if sub/ is no longer a subrepository in the working copy
238 # parent revision.
238 # parent revision.
239 #
239 #
240 # However, it can of course also allow things that would have
240 # However, it can of course also allow things that would have
241 # been rejected before, such as the above cat command if sub/
241 # been rejected before, such as the above cat command if sub/
242 # is a subrepository now, but was a normal directory before.
242 # is a subrepository now, but was a normal directory before.
243 # The old path auditor would have rejected by mistake since it
243 # The old path auditor would have rejected by mistake since it
244 # panics when it sees sub/.hg/.
244 # panics when it sees sub/.hg/.
245 #
245 #
246 # All in all, checking against the working copy seems sensible
246 # All in all, checking against the working copy seems sensible
247 # since we want to prevent access to nested repositories on
247 # since we want to prevent access to nested repositories on
248 # the filesystem *now*.
248 # the filesystem *now*.
249 ctx = self[None]
249 ctx = self[None]
250 parts = util.splitpath(subpath)
250 parts = util.splitpath(subpath)
251 while parts:
251 while parts:
252 prefix = '/'.join(parts)
252 prefix = '/'.join(parts)
253 if prefix in ctx.substate:
253 if prefix in ctx.substate:
254 if prefix == normsubpath:
254 if prefix == normsubpath:
255 return True
255 return True
256 else:
256 else:
257 sub = ctx.sub(prefix)
257 sub = ctx.sub(prefix)
258 return sub.checknested(subpath[len(prefix) + 1:])
258 return sub.checknested(subpath[len(prefix) + 1:])
259 else:
259 else:
260 parts.pop()
260 parts.pop()
261 return False
261 return False
262
262
263 def peer(self):
263 def peer(self):
264 return localpeer(self) # not cached to avoid reference cycle
264 return localpeer(self) # not cached to avoid reference cycle
265
265
266 @filecache('bookmarks')
266 @filecache('bookmarks')
267 def _bookmarks(self):
267 def _bookmarks(self):
268 return bookmarks.read(self)
268 return bookmarks.read(self)
269
269
270 @filecache('bookmarks.current')
270 @filecache('bookmarks.current')
271 def _bookmarkcurrent(self):
271 def _bookmarkcurrent(self):
272 return bookmarks.readcurrent(self)
272 return bookmarks.readcurrent(self)
273
273
274 def _writebookmarks(self, marks):
274 def _writebookmarks(self, marks):
275 bookmarks.write(self)
275 bookmarks.write(self)
276
276
277 def bookmarkheads(self, bookmark):
277 def bookmarkheads(self, bookmark):
278 name = bookmark.split('@', 1)[0]
278 name = bookmark.split('@', 1)[0]
279 heads = []
279 heads = []
280 for mark, n in self._bookmarks.iteritems():
280 for mark, n in self._bookmarks.iteritems():
281 if mark.split('@', 1)[0] == name:
281 if mark.split('@', 1)[0] == name:
282 heads.append(n)
282 heads.append(n)
283 return heads
283 return heads
284
284
285 @storecache('phaseroots')
285 @storecache('phaseroots')
286 def _phasecache(self):
286 def _phasecache(self):
287 return phases.phasecache(self, self._phasedefaults)
287 return phases.phasecache(self, self._phasedefaults)
288
288
289 @storecache('obsstore')
289 @storecache('obsstore')
290 def obsstore(self):
290 def obsstore(self):
291 store = obsolete.obsstore(self.sopener)
291 store = obsolete.obsstore(self.sopener)
292 if store and not obsolete._enabled:
292 if store and not obsolete._enabled:
293 # message is rare enough to not be stranlated
293 # message is rare enough to not be translated
294 msg = 'obsolete feature not enabled but %i markers found!\n'
294 msg = 'obsolete feature not enabled but %i markers found!\n'
295 self.ui.warn(msg % len(list(store)))
295 self.ui.warn(msg % len(list(store)))
296 return store
296 return store
297
297
298 @propertycache
298 @propertycache
299 def hiddenrevs(self):
299 def hiddenrevs(self):
300 """hiddenrevs: revs that should be hidden by command and tools
300 """hiddenrevs: revs that should be hidden by command and tools
301
301
302 This set is carried on the repo to ease initialisation and lazy
302 This set is carried on the repo to ease initialisation and lazy
303 loading it'll probably move back to changelog for efficienty and
303 loading it'll probably move back to changelog for efficienty and
304 consistency reason
304 consistency reason
305
305
306 Note that the hiddenrevs will needs invalidations when
306 Note that the hiddenrevs will needs invalidations when
307 - a new changesets is added (possible unstable above extinct)
307 - a new changesets is added (possible unstable above extinct)
308 - a new obsolete marker is added (possible new extinct changeset)
308 - a new obsolete marker is added (possible new extinct changeset)
309 """
309 """
310 hidden = set()
310 hidden = set()
311 if self.obsstore:
311 if self.obsstore:
312 ### hide extinct changeset that are not accessible by any mean
312 ### hide extinct changeset that are not accessible by any mean
313 hiddenquery = 'extinct() - ::(. + bookmark() + tagged())'
313 hiddenquery = 'extinct() - ::(. + bookmark() + tagged())'
314 hidden.update(self.revs(hiddenquery))
314 hidden.update(self.revs(hiddenquery))
315 return hidden
315 return hidden
316
316
317 @storecache('00changelog.i')
317 @storecache('00changelog.i')
318 def changelog(self):
318 def changelog(self):
319 c = changelog.changelog(self.sopener)
319 c = changelog.changelog(self.sopener)
320 if 'HG_PENDING' in os.environ:
320 if 'HG_PENDING' in os.environ:
321 p = os.environ['HG_PENDING']
321 p = os.environ['HG_PENDING']
322 if p.startswith(self.root):
322 if p.startswith(self.root):
323 c.readpending('00changelog.i.a')
323 c.readpending('00changelog.i.a')
324 return c
324 return c
325
325
326 @storecache('00manifest.i')
326 @storecache('00manifest.i')
327 def manifest(self):
327 def manifest(self):
328 return manifest.manifest(self.sopener)
328 return manifest.manifest(self.sopener)
329
329
330 @filecache('dirstate')
330 @filecache('dirstate')
331 def dirstate(self):
331 def dirstate(self):
332 warned = [0]
332 warned = [0]
333 def validate(node):
333 def validate(node):
334 try:
334 try:
335 self.changelog.rev(node)
335 self.changelog.rev(node)
336 return node
336 return node
337 except error.LookupError:
337 except error.LookupError:
338 if not warned[0]:
338 if not warned[0]:
339 warned[0] = True
339 warned[0] = True
340 self.ui.warn(_("warning: ignoring unknown"
340 self.ui.warn(_("warning: ignoring unknown"
341 " working parent %s!\n") % short(node))
341 " working parent %s!\n") % short(node))
342 return nullid
342 return nullid
343
343
344 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
344 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
345
345
346 def __getitem__(self, changeid):
346 def __getitem__(self, changeid):
347 if changeid is None:
347 if changeid is None:
348 return context.workingctx(self)
348 return context.workingctx(self)
349 return context.changectx(self, changeid)
349 return context.changectx(self, changeid)
350
350
351 def __contains__(self, changeid):
351 def __contains__(self, changeid):
352 try:
352 try:
353 return bool(self.lookup(changeid))
353 return bool(self.lookup(changeid))
354 except error.RepoLookupError:
354 except error.RepoLookupError:
355 return False
355 return False
356
356
357 def __nonzero__(self):
357 def __nonzero__(self):
358 return True
358 return True
359
359
360 def __len__(self):
360 def __len__(self):
361 return len(self.changelog)
361 return len(self.changelog)
362
362
363 def __iter__(self):
363 def __iter__(self):
364 for i in xrange(len(self)):
364 for i in xrange(len(self)):
365 yield i
365 yield i
366
366
367 def revs(self, expr, *args):
367 def revs(self, expr, *args):
368 '''Return a list of revisions matching the given revset'''
368 '''Return a list of revisions matching the given revset'''
369 expr = revset.formatspec(expr, *args)
369 expr = revset.formatspec(expr, *args)
370 m = revset.match(None, expr)
370 m = revset.match(None, expr)
371 return [r for r in m(self, range(len(self)))]
371 return [r for r in m(self, range(len(self)))]
372
372
373 def set(self, expr, *args):
373 def set(self, expr, *args):
374 '''
374 '''
375 Yield a context for each matching revision, after doing arg
375 Yield a context for each matching revision, after doing arg
376 replacement via revset.formatspec
376 replacement via revset.formatspec
377 '''
377 '''
378 for r in self.revs(expr, *args):
378 for r in self.revs(expr, *args):
379 yield self[r]
379 yield self[r]
380
380
381 def url(self):
381 def url(self):
382 return 'file:' + self.root
382 return 'file:' + self.root
383
383
384 def hook(self, name, throw=False, **args):
384 def hook(self, name, throw=False, **args):
385 return hook.hook(self.ui, self, name, throw, **args)
385 return hook.hook(self.ui, self, name, throw, **args)
386
386
387 tag_disallowed = ':\r\n'
387 tag_disallowed = ':\r\n'
388
388
389 def _tag(self, names, node, message, local, user, date, extra={}):
389 def _tag(self, names, node, message, local, user, date, extra={}):
390 if isinstance(names, str):
390 if isinstance(names, str):
391 allchars = names
391 allchars = names
392 names = (names,)
392 names = (names,)
393 else:
393 else:
394 allchars = ''.join(names)
394 allchars = ''.join(names)
395 for c in self.tag_disallowed:
395 for c in self.tag_disallowed:
396 if c in allchars:
396 if c in allchars:
397 raise util.Abort(_('%r cannot be used in a tag name') % c)
397 raise util.Abort(_('%r cannot be used in a tag name') % c)
398
398
399 branches = self.branchmap()
399 branches = self.branchmap()
400 for name in names:
400 for name in names:
401 self.hook('pretag', throw=True, node=hex(node), tag=name,
401 self.hook('pretag', throw=True, node=hex(node), tag=name,
402 local=local)
402 local=local)
403 if name in branches:
403 if name in branches:
404 self.ui.warn(_("warning: tag %s conflicts with existing"
404 self.ui.warn(_("warning: tag %s conflicts with existing"
405 " branch name\n") % name)
405 " branch name\n") % name)
406
406
407 def writetags(fp, names, munge, prevtags):
407 def writetags(fp, names, munge, prevtags):
408 fp.seek(0, 2)
408 fp.seek(0, 2)
409 if prevtags and prevtags[-1] != '\n':
409 if prevtags and prevtags[-1] != '\n':
410 fp.write('\n')
410 fp.write('\n')
411 for name in names:
411 for name in names:
412 m = munge and munge(name) or name
412 m = munge and munge(name) or name
413 if (self._tagscache.tagtypes and
413 if (self._tagscache.tagtypes and
414 name in self._tagscache.tagtypes):
414 name in self._tagscache.tagtypes):
415 old = self.tags().get(name, nullid)
415 old = self.tags().get(name, nullid)
416 fp.write('%s %s\n' % (hex(old), m))
416 fp.write('%s %s\n' % (hex(old), m))
417 fp.write('%s %s\n' % (hex(node), m))
417 fp.write('%s %s\n' % (hex(node), m))
418 fp.close()
418 fp.close()
419
419
420 prevtags = ''
420 prevtags = ''
421 if local:
421 if local:
422 try:
422 try:
423 fp = self.opener('localtags', 'r+')
423 fp = self.opener('localtags', 'r+')
424 except IOError:
424 except IOError:
425 fp = self.opener('localtags', 'a')
425 fp = self.opener('localtags', 'a')
426 else:
426 else:
427 prevtags = fp.read()
427 prevtags = fp.read()
428
428
429 # local tags are stored in the current charset
429 # local tags are stored in the current charset
430 writetags(fp, names, None, prevtags)
430 writetags(fp, names, None, prevtags)
431 for name in names:
431 for name in names:
432 self.hook('tag', node=hex(node), tag=name, local=local)
432 self.hook('tag', node=hex(node), tag=name, local=local)
433 return
433 return
434
434
435 try:
435 try:
436 fp = self.wfile('.hgtags', 'rb+')
436 fp = self.wfile('.hgtags', 'rb+')
437 except IOError, e:
437 except IOError, e:
438 if e.errno != errno.ENOENT:
438 if e.errno != errno.ENOENT:
439 raise
439 raise
440 fp = self.wfile('.hgtags', 'ab')
440 fp = self.wfile('.hgtags', 'ab')
441 else:
441 else:
442 prevtags = fp.read()
442 prevtags = fp.read()
443
443
444 # committed tags are stored in UTF-8
444 # committed tags are stored in UTF-8
445 writetags(fp, names, encoding.fromlocal, prevtags)
445 writetags(fp, names, encoding.fromlocal, prevtags)
446
446
447 fp.close()
447 fp.close()
448
448
449 self.invalidatecaches()
449 self.invalidatecaches()
450
450
451 if '.hgtags' not in self.dirstate:
451 if '.hgtags' not in self.dirstate:
452 self[None].add(['.hgtags'])
452 self[None].add(['.hgtags'])
453
453
454 m = matchmod.exact(self.root, '', ['.hgtags'])
454 m = matchmod.exact(self.root, '', ['.hgtags'])
455 tagnode = self.commit(message, user, date, extra=extra, match=m)
455 tagnode = self.commit(message, user, date, extra=extra, match=m)
456
456
457 for name in names:
457 for name in names:
458 self.hook('tag', node=hex(node), tag=name, local=local)
458 self.hook('tag', node=hex(node), tag=name, local=local)
459
459
460 return tagnode
460 return tagnode
461
461
462 def tag(self, names, node, message, local, user, date):
462 def tag(self, names, node, message, local, user, date):
463 '''tag a revision with one or more symbolic names.
463 '''tag a revision with one or more symbolic names.
464
464
465 names is a list of strings or, when adding a single tag, names may be a
465 names is a list of strings or, when adding a single tag, names may be a
466 string.
466 string.
467
467
468 if local is True, the tags are stored in a per-repository file.
468 if local is True, the tags are stored in a per-repository file.
469 otherwise, they are stored in the .hgtags file, and a new
469 otherwise, they are stored in the .hgtags file, and a new
470 changeset is committed with the change.
470 changeset is committed with the change.
471
471
472 keyword arguments:
472 keyword arguments:
473
473
474 local: whether to store tags in non-version-controlled file
474 local: whether to store tags in non-version-controlled file
475 (default False)
475 (default False)
476
476
477 message: commit message to use if committing
477 message: commit message to use if committing
478
478
479 user: name of user to use if committing
479 user: name of user to use if committing
480
480
481 date: date tuple to use if committing'''
481 date: date tuple to use if committing'''
482
482
483 if not local:
483 if not local:
484 for x in self.status()[:5]:
484 for x in self.status()[:5]:
485 if '.hgtags' in x:
485 if '.hgtags' in x:
486 raise util.Abort(_('working copy of .hgtags is changed '
486 raise util.Abort(_('working copy of .hgtags is changed '
487 '(please commit .hgtags manually)'))
487 '(please commit .hgtags manually)'))
488
488
489 self.tags() # instantiate the cache
489 self.tags() # instantiate the cache
490 self._tag(names, node, message, local, user, date)
490 self._tag(names, node, message, local, user, date)
491
491
492 @propertycache
492 @propertycache
493 def _tagscache(self):
493 def _tagscache(self):
494 '''Returns a tagscache object that contains various tags related
494 '''Returns a tagscache object that contains various tags related
495 caches.'''
495 caches.'''
496
496
497 # This simplifies its cache management by having one decorated
497 # This simplifies its cache management by having one decorated
498 # function (this one) and the rest simply fetch things from it.
498 # function (this one) and the rest simply fetch things from it.
499 class tagscache(object):
499 class tagscache(object):
500 def __init__(self):
500 def __init__(self):
501 # These two define the set of tags for this repository. tags
501 # These two define the set of tags for this repository. tags
502 # maps tag name to node; tagtypes maps tag name to 'global' or
502 # maps tag name to node; tagtypes maps tag name to 'global' or
503 # 'local'. (Global tags are defined by .hgtags across all
503 # 'local'. (Global tags are defined by .hgtags across all
504 # heads, and local tags are defined in .hg/localtags.)
504 # heads, and local tags are defined in .hg/localtags.)
505 # They constitute the in-memory cache of tags.
505 # They constitute the in-memory cache of tags.
506 self.tags = self.tagtypes = None
506 self.tags = self.tagtypes = None
507
507
508 self.nodetagscache = self.tagslist = None
508 self.nodetagscache = self.tagslist = None
509
509
510 cache = tagscache()
510 cache = tagscache()
511 cache.tags, cache.tagtypes = self._findtags()
511 cache.tags, cache.tagtypes = self._findtags()
512
512
513 return cache
513 return cache
514
514
515 def tags(self):
515 def tags(self):
516 '''return a mapping of tag to node'''
516 '''return a mapping of tag to node'''
517 t = {}
517 t = {}
518 for k, v in self._tagscache.tags.iteritems():
518 for k, v in self._tagscache.tags.iteritems():
519 try:
519 try:
520 # ignore tags to unknown nodes
520 # ignore tags to unknown nodes
521 self.changelog.rev(v)
521 self.changelog.rev(v)
522 t[k] = v
522 t[k] = v
523 except (error.LookupError, ValueError):
523 except (error.LookupError, ValueError):
524 pass
524 pass
525 return t
525 return t
526
526
527 def _findtags(self):
527 def _findtags(self):
528 '''Do the hard work of finding tags. Return a pair of dicts
528 '''Do the hard work of finding tags. Return a pair of dicts
529 (tags, tagtypes) where tags maps tag name to node, and tagtypes
529 (tags, tagtypes) where tags maps tag name to node, and tagtypes
530 maps tag name to a string like \'global\' or \'local\'.
530 maps tag name to a string like \'global\' or \'local\'.
531 Subclasses or extensions are free to add their own tags, but
531 Subclasses or extensions are free to add their own tags, but
532 should be aware that the returned dicts will be retained for the
532 should be aware that the returned dicts will be retained for the
533 duration of the localrepo object.'''
533 duration of the localrepo object.'''
534
534
535 # XXX what tagtype should subclasses/extensions use? Currently
535 # XXX what tagtype should subclasses/extensions use? Currently
536 # mq and bookmarks add tags, but do not set the tagtype at all.
536 # mq and bookmarks add tags, but do not set the tagtype at all.
537 # Should each extension invent its own tag type? Should there
537 # Should each extension invent its own tag type? Should there
538 # be one tagtype for all such "virtual" tags? Or is the status
538 # be one tagtype for all such "virtual" tags? Or is the status
539 # quo fine?
539 # quo fine?
540
540
541 alltags = {} # map tag name to (node, hist)
541 alltags = {} # map tag name to (node, hist)
542 tagtypes = {}
542 tagtypes = {}
543
543
544 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
544 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
545 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
545 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
546
546
547 # Build the return dicts. Have to re-encode tag names because
547 # Build the return dicts. Have to re-encode tag names because
548 # the tags module always uses UTF-8 (in order not to lose info
548 # the tags module always uses UTF-8 (in order not to lose info
549 # writing to the cache), but the rest of Mercurial wants them in
549 # writing to the cache), but the rest of Mercurial wants them in
550 # local encoding.
550 # local encoding.
551 tags = {}
551 tags = {}
552 for (name, (node, hist)) in alltags.iteritems():
552 for (name, (node, hist)) in alltags.iteritems():
553 if node != nullid:
553 if node != nullid:
554 tags[encoding.tolocal(name)] = node
554 tags[encoding.tolocal(name)] = node
555 tags['tip'] = self.changelog.tip()
555 tags['tip'] = self.changelog.tip()
556 tagtypes = dict([(encoding.tolocal(name), value)
556 tagtypes = dict([(encoding.tolocal(name), value)
557 for (name, value) in tagtypes.iteritems()])
557 for (name, value) in tagtypes.iteritems()])
558 return (tags, tagtypes)
558 return (tags, tagtypes)
559
559
560 def tagtype(self, tagname):
560 def tagtype(self, tagname):
561 '''
561 '''
562 return the type of the given tag. result can be:
562 return the type of the given tag. result can be:
563
563
564 'local' : a local tag
564 'local' : a local tag
565 'global' : a global tag
565 'global' : a global tag
566 None : tag does not exist
566 None : tag does not exist
567 '''
567 '''
568
568
569 return self._tagscache.tagtypes.get(tagname)
569 return self._tagscache.tagtypes.get(tagname)
570
570
571 def tagslist(self):
571 def tagslist(self):
572 '''return a list of tags ordered by revision'''
572 '''return a list of tags ordered by revision'''
573 if not self._tagscache.tagslist:
573 if not self._tagscache.tagslist:
574 l = []
574 l = []
575 for t, n in self.tags().iteritems():
575 for t, n in self.tags().iteritems():
576 r = self.changelog.rev(n)
576 r = self.changelog.rev(n)
577 l.append((r, t, n))
577 l.append((r, t, n))
578 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
578 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
579
579
580 return self._tagscache.tagslist
580 return self._tagscache.tagslist
581
581
582 def nodetags(self, node):
582 def nodetags(self, node):
583 '''return the tags associated with a node'''
583 '''return the tags associated with a node'''
584 if not self._tagscache.nodetagscache:
584 if not self._tagscache.nodetagscache:
585 nodetagscache = {}
585 nodetagscache = {}
586 for t, n in self._tagscache.tags.iteritems():
586 for t, n in self._tagscache.tags.iteritems():
587 nodetagscache.setdefault(n, []).append(t)
587 nodetagscache.setdefault(n, []).append(t)
588 for tags in nodetagscache.itervalues():
588 for tags in nodetagscache.itervalues():
589 tags.sort()
589 tags.sort()
590 self._tagscache.nodetagscache = nodetagscache
590 self._tagscache.nodetagscache = nodetagscache
591 return self._tagscache.nodetagscache.get(node, [])
591 return self._tagscache.nodetagscache.get(node, [])
592
592
593 def nodebookmarks(self, node):
593 def nodebookmarks(self, node):
594 marks = []
594 marks = []
595 for bookmark, n in self._bookmarks.iteritems():
595 for bookmark, n in self._bookmarks.iteritems():
596 if n == node:
596 if n == node:
597 marks.append(bookmark)
597 marks.append(bookmark)
598 return sorted(marks)
598 return sorted(marks)
599
599
600 def _branchtags(self, partial, lrev):
600 def _branchtags(self, partial, lrev):
601 # TODO: rename this function?
601 # TODO: rename this function?
602 tiprev = len(self) - 1
602 tiprev = len(self) - 1
603 if lrev != tiprev:
603 if lrev != tiprev:
604 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
604 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
605 self._updatebranchcache(partial, ctxgen)
605 self._updatebranchcache(partial, ctxgen)
606 self._writebranchcache(partial, self.changelog.tip(), tiprev)
606 self._writebranchcache(partial, self.changelog.tip(), tiprev)
607
607
608 return partial
608 return partial
609
609
610 def updatebranchcache(self):
610 def updatebranchcache(self):
611 tip = self.changelog.tip()
611 tip = self.changelog.tip()
612 if self._branchcache is not None and self._branchcachetip == tip:
612 if self._branchcache is not None and self._branchcachetip == tip:
613 return
613 return
614
614
615 oldtip = self._branchcachetip
615 oldtip = self._branchcachetip
616 self._branchcachetip = tip
616 self._branchcachetip = tip
617 if oldtip is None or oldtip not in self.changelog.nodemap:
617 if oldtip is None or oldtip not in self.changelog.nodemap:
618 partial, last, lrev = self._readbranchcache()
618 partial, last, lrev = self._readbranchcache()
619 else:
619 else:
620 lrev = self.changelog.rev(oldtip)
620 lrev = self.changelog.rev(oldtip)
621 partial = self._branchcache
621 partial = self._branchcache
622
622
623 self._branchtags(partial, lrev)
623 self._branchtags(partial, lrev)
624 # this private cache holds all heads (not just the branch tips)
624 # this private cache holds all heads (not just the branch tips)
625 self._branchcache = partial
625 self._branchcache = partial
626
626
627 def branchmap(self):
627 def branchmap(self):
628 '''returns a dictionary {branch: [branchheads]}'''
628 '''returns a dictionary {branch: [branchheads]}'''
629 self.updatebranchcache()
629 self.updatebranchcache()
630 return self._branchcache
630 return self._branchcache
631
631
632 def _branchtip(self, heads):
632 def _branchtip(self, heads):
633 '''return the tipmost branch head in heads'''
633 '''return the tipmost branch head in heads'''
634 tip = heads[-1]
634 tip = heads[-1]
635 for h in reversed(heads):
635 for h in reversed(heads):
636 if not self[h].closesbranch():
636 if not self[h].closesbranch():
637 tip = h
637 tip = h
638 break
638 break
639 return tip
639 return tip
640
640
641 def branchtip(self, branch):
641 def branchtip(self, branch):
642 '''return the tip node for a given branch'''
642 '''return the tip node for a given branch'''
643 if branch not in self.branchmap():
643 if branch not in self.branchmap():
644 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
644 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
645 return self._branchtip(self.branchmap()[branch])
645 return self._branchtip(self.branchmap()[branch])
646
646
647 def branchtags(self):
647 def branchtags(self):
648 '''return a dict where branch names map to the tipmost head of
648 '''return a dict where branch names map to the tipmost head of
649 the branch, open heads come before closed'''
649 the branch, open heads come before closed'''
650 bt = {}
650 bt = {}
651 for bn, heads in self.branchmap().iteritems():
651 for bn, heads in self.branchmap().iteritems():
652 bt[bn] = self._branchtip(heads)
652 bt[bn] = self._branchtip(heads)
653 return bt
653 return bt
654
654
655 def _readbranchcache(self):
655 def _readbranchcache(self):
656 partial = {}
656 partial = {}
657 try:
657 try:
658 f = self.opener("cache/branchheads")
658 f = self.opener("cache/branchheads")
659 lines = f.read().split('\n')
659 lines = f.read().split('\n')
660 f.close()
660 f.close()
661 except (IOError, OSError):
661 except (IOError, OSError):
662 return {}, nullid, nullrev
662 return {}, nullid, nullrev
663
663
664 try:
664 try:
665 last, lrev = lines.pop(0).split(" ", 1)
665 last, lrev = lines.pop(0).split(" ", 1)
666 last, lrev = bin(last), int(lrev)
666 last, lrev = bin(last), int(lrev)
667 if lrev >= len(self) or self[lrev].node() != last:
667 if lrev >= len(self) or self[lrev].node() != last:
668 # invalidate the cache
668 # invalidate the cache
669 raise ValueError('invalidating branch cache (tip differs)')
669 raise ValueError('invalidating branch cache (tip differs)')
670 for l in lines:
670 for l in lines:
671 if not l:
671 if not l:
672 continue
672 continue
673 node, label = l.split(" ", 1)
673 node, label = l.split(" ", 1)
674 label = encoding.tolocal(label.strip())
674 label = encoding.tolocal(label.strip())
675 if not node in self:
675 if not node in self:
676 raise ValueError('invalidating branch cache because node '+
676 raise ValueError('invalidating branch cache because node '+
677 '%s does not exist' % node)
677 '%s does not exist' % node)
678 partial.setdefault(label, []).append(bin(node))
678 partial.setdefault(label, []).append(bin(node))
679 except KeyboardInterrupt:
679 except KeyboardInterrupt:
680 raise
680 raise
681 except Exception, inst:
681 except Exception, inst:
682 if self.ui.debugflag:
682 if self.ui.debugflag:
683 self.ui.warn(str(inst), '\n')
683 self.ui.warn(str(inst), '\n')
684 partial, last, lrev = {}, nullid, nullrev
684 partial, last, lrev = {}, nullid, nullrev
685 return partial, last, lrev
685 return partial, last, lrev
686
686
687 def _writebranchcache(self, branches, tip, tiprev):
687 def _writebranchcache(self, branches, tip, tiprev):
688 try:
688 try:
689 f = self.opener("cache/branchheads", "w", atomictemp=True)
689 f = self.opener("cache/branchheads", "w", atomictemp=True)
690 f.write("%s %s\n" % (hex(tip), tiprev))
690 f.write("%s %s\n" % (hex(tip), tiprev))
691 for label, nodes in branches.iteritems():
691 for label, nodes in branches.iteritems():
692 for node in nodes:
692 for node in nodes:
693 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
693 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
694 f.close()
694 f.close()
695 except (IOError, OSError):
695 except (IOError, OSError):
696 pass
696 pass
697
697
698 def _updatebranchcache(self, partial, ctxgen):
698 def _updatebranchcache(self, partial, ctxgen):
699 """Given a branchhead cache, partial, that may have extra nodes or be
699 """Given a branchhead cache, partial, that may have extra nodes or be
700 missing heads, and a generator of nodes that are at least a superset of
700 missing heads, and a generator of nodes that are at least a superset of
701 heads missing, this function updates partial to be correct.
701 heads missing, this function updates partial to be correct.
702 """
702 """
703 # collect new branch entries
703 # collect new branch entries
704 newbranches = {}
704 newbranches = {}
705 for c in ctxgen:
705 for c in ctxgen:
706 newbranches.setdefault(c.branch(), []).append(c.node())
706 newbranches.setdefault(c.branch(), []).append(c.node())
707 # if older branchheads are reachable from new ones, they aren't
707 # if older branchheads are reachable from new ones, they aren't
708 # really branchheads. Note checking parents is insufficient:
708 # really branchheads. Note checking parents is insufficient:
709 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
709 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
710 for branch, newnodes in newbranches.iteritems():
710 for branch, newnodes in newbranches.iteritems():
711 bheads = partial.setdefault(branch, [])
711 bheads = partial.setdefault(branch, [])
712 # Remove candidate heads that no longer are in the repo (e.g., as
712 # Remove candidate heads that no longer are in the repo (e.g., as
713 # the result of a strip that just happened). Avoid using 'node in
713 # the result of a strip that just happened). Avoid using 'node in
714 # self' here because that dives down into branchcache code somewhat
714 # self' here because that dives down into branchcache code somewhat
715 # recrusively.
715 # recrusively.
716 bheadrevs = [self.changelog.rev(node) for node in bheads
716 bheadrevs = [self.changelog.rev(node) for node in bheads
717 if self.changelog.hasnode(node)]
717 if self.changelog.hasnode(node)]
718 newheadrevs = [self.changelog.rev(node) for node in newnodes
718 newheadrevs = [self.changelog.rev(node) for node in newnodes
719 if self.changelog.hasnode(node)]
719 if self.changelog.hasnode(node)]
720 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
720 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
721 # Remove duplicates - nodes that are in newheadrevs and are already
721 # Remove duplicates - nodes that are in newheadrevs and are already
722 # in bheadrevs. This can happen if you strip a node whose parent
722 # in bheadrevs. This can happen if you strip a node whose parent
723 # was already a head (because they're on different branches).
723 # was already a head (because they're on different branches).
724 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
724 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
725
725
726 # Starting from tip means fewer passes over reachable. If we know
726 # Starting from tip means fewer passes over reachable. If we know
727 # the new candidates are not ancestors of existing heads, we don't
727 # the new candidates are not ancestors of existing heads, we don't
728 # have to examine ancestors of existing heads
728 # have to examine ancestors of existing heads
729 if ctxisnew:
729 if ctxisnew:
730 iterrevs = sorted(newheadrevs)
730 iterrevs = sorted(newheadrevs)
731 else:
731 else:
732 iterrevs = list(bheadrevs)
732 iterrevs = list(bheadrevs)
733
733
734 # This loop prunes out two kinds of heads - heads that are
734 # This loop prunes out two kinds of heads - heads that are
735 # superceded by a head in newheadrevs, and newheadrevs that are not
735 # superceded by a head in newheadrevs, and newheadrevs that are not
736 # heads because an existing head is their descendant.
736 # heads because an existing head is their descendant.
737 while iterrevs:
737 while iterrevs:
738 latest = iterrevs.pop()
738 latest = iterrevs.pop()
739 if latest not in bheadrevs:
739 if latest not in bheadrevs:
740 continue
740 continue
741 ancestors = set(self.changelog.ancestors([latest],
741 ancestors = set(self.changelog.ancestors([latest],
742 bheadrevs[0]))
742 bheadrevs[0]))
743 if ancestors:
743 if ancestors:
744 bheadrevs = [b for b in bheadrevs if b not in ancestors]
744 bheadrevs = [b for b in bheadrevs if b not in ancestors]
745 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
745 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
746
746
747 # There may be branches that cease to exist when the last commit in the
747 # There may be branches that cease to exist when the last commit in the
748 # branch was stripped. This code filters them out. Note that the
748 # branch was stripped. This code filters them out. Note that the
749 # branch that ceased to exist may not be in newbranches because
749 # branch that ceased to exist may not be in newbranches because
750 # newbranches is the set of candidate heads, which when you strip the
750 # newbranches is the set of candidate heads, which when you strip the
751 # last commit in a branch will be the parent branch.
751 # last commit in a branch will be the parent branch.
752 for branch in partial.keys():
752 for branch in partial.keys():
753 nodes = [head for head in partial[branch]
753 nodes = [head for head in partial[branch]
754 if self.changelog.hasnode(head)]
754 if self.changelog.hasnode(head)]
755 if not nodes:
755 if not nodes:
756 del partial[branch]
756 del partial[branch]
757
757
758 def lookup(self, key):
758 def lookup(self, key):
759 return self[key].node()
759 return self[key].node()
760
760
761 def lookupbranch(self, key, remote=None):
761 def lookupbranch(self, key, remote=None):
762 repo = remote or self
762 repo = remote or self
763 if key in repo.branchmap():
763 if key in repo.branchmap():
764 return key
764 return key
765
765
766 repo = (remote and remote.local()) and remote or self
766 repo = (remote and remote.local()) and remote or self
767 return repo[key].branch()
767 return repo[key].branch()
768
768
769 def known(self, nodes):
769 def known(self, nodes):
770 nm = self.changelog.nodemap
770 nm = self.changelog.nodemap
771 pc = self._phasecache
771 pc = self._phasecache
772 result = []
772 result = []
773 for n in nodes:
773 for n in nodes:
774 r = nm.get(n)
774 r = nm.get(n)
775 resp = not (r is None or pc.phase(self, r) >= phases.secret)
775 resp = not (r is None or pc.phase(self, r) >= phases.secret)
776 result.append(resp)
776 result.append(resp)
777 return result
777 return result
778
778
779 def local(self):
779 def local(self):
780 return self
780 return self
781
781
782 def cancopy(self):
782 def cancopy(self):
783 return self.local() # so statichttprepo's override of local() works
783 return self.local() # so statichttprepo's override of local() works
784
784
785 def join(self, f):
785 def join(self, f):
786 return os.path.join(self.path, f)
786 return os.path.join(self.path, f)
787
787
788 def wjoin(self, f):
788 def wjoin(self, f):
789 return os.path.join(self.root, f)
789 return os.path.join(self.root, f)
790
790
791 def file(self, f):
791 def file(self, f):
792 if f[0] == '/':
792 if f[0] == '/':
793 f = f[1:]
793 f = f[1:]
794 return filelog.filelog(self.sopener, f)
794 return filelog.filelog(self.sopener, f)
795
795
796 def changectx(self, changeid):
796 def changectx(self, changeid):
797 return self[changeid]
797 return self[changeid]
798
798
799 def parents(self, changeid=None):
799 def parents(self, changeid=None):
800 '''get list of changectxs for parents of changeid'''
800 '''get list of changectxs for parents of changeid'''
801 return self[changeid].parents()
801 return self[changeid].parents()
802
802
803 def setparents(self, p1, p2=nullid):
803 def setparents(self, p1, p2=nullid):
804 copies = self.dirstate.setparents(p1, p2)
804 copies = self.dirstate.setparents(p1, p2)
805 if copies:
805 if copies:
806 # Adjust copy records, the dirstate cannot do it, it
806 # Adjust copy records, the dirstate cannot do it, it
807 # requires access to parents manifests. Preserve them
807 # requires access to parents manifests. Preserve them
808 # only for entries added to first parent.
808 # only for entries added to first parent.
809 pctx = self[p1]
809 pctx = self[p1]
810 for f in copies:
810 for f in copies:
811 if f not in pctx and copies[f] in pctx:
811 if f not in pctx and copies[f] in pctx:
812 self.dirstate.copy(copies[f], f)
812 self.dirstate.copy(copies[f], f)
813
813
814 def filectx(self, path, changeid=None, fileid=None):
814 def filectx(self, path, changeid=None, fileid=None):
815 """changeid can be a changeset revision, node, or tag.
815 """changeid can be a changeset revision, node, or tag.
816 fileid can be a file revision or node."""
816 fileid can be a file revision or node."""
817 return context.filectx(self, path, changeid, fileid)
817 return context.filectx(self, path, changeid, fileid)
818
818
819 def getcwd(self):
819 def getcwd(self):
820 return self.dirstate.getcwd()
820 return self.dirstate.getcwd()
821
821
822 def pathto(self, f, cwd=None):
822 def pathto(self, f, cwd=None):
823 return self.dirstate.pathto(f, cwd)
823 return self.dirstate.pathto(f, cwd)
824
824
825 def wfile(self, f, mode='r'):
825 def wfile(self, f, mode='r'):
826 return self.wopener(f, mode)
826 return self.wopener(f, mode)
827
827
828 def _link(self, f):
828 def _link(self, f):
829 return os.path.islink(self.wjoin(f))
829 return os.path.islink(self.wjoin(f))
830
830
831 def _loadfilter(self, filter):
831 def _loadfilter(self, filter):
832 if filter not in self.filterpats:
832 if filter not in self.filterpats:
833 l = []
833 l = []
834 for pat, cmd in self.ui.configitems(filter):
834 for pat, cmd in self.ui.configitems(filter):
835 if cmd == '!':
835 if cmd == '!':
836 continue
836 continue
837 mf = matchmod.match(self.root, '', [pat])
837 mf = matchmod.match(self.root, '', [pat])
838 fn = None
838 fn = None
839 params = cmd
839 params = cmd
840 for name, filterfn in self._datafilters.iteritems():
840 for name, filterfn in self._datafilters.iteritems():
841 if cmd.startswith(name):
841 if cmd.startswith(name):
842 fn = filterfn
842 fn = filterfn
843 params = cmd[len(name):].lstrip()
843 params = cmd[len(name):].lstrip()
844 break
844 break
845 if not fn:
845 if not fn:
846 fn = lambda s, c, **kwargs: util.filter(s, c)
846 fn = lambda s, c, **kwargs: util.filter(s, c)
847 # Wrap old filters not supporting keyword arguments
847 # Wrap old filters not supporting keyword arguments
848 if not inspect.getargspec(fn)[2]:
848 if not inspect.getargspec(fn)[2]:
849 oldfn = fn
849 oldfn = fn
850 fn = lambda s, c, **kwargs: oldfn(s, c)
850 fn = lambda s, c, **kwargs: oldfn(s, c)
851 l.append((mf, fn, params))
851 l.append((mf, fn, params))
852 self.filterpats[filter] = l
852 self.filterpats[filter] = l
853 return self.filterpats[filter]
853 return self.filterpats[filter]
854
854
855 def _filter(self, filterpats, filename, data):
855 def _filter(self, filterpats, filename, data):
856 for mf, fn, cmd in filterpats:
856 for mf, fn, cmd in filterpats:
857 if mf(filename):
857 if mf(filename):
858 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
858 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
859 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
859 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
860 break
860 break
861
861
862 return data
862 return data
863
863
864 @propertycache
864 @propertycache
865 def _encodefilterpats(self):
865 def _encodefilterpats(self):
866 return self._loadfilter('encode')
866 return self._loadfilter('encode')
867
867
868 @propertycache
868 @propertycache
869 def _decodefilterpats(self):
869 def _decodefilterpats(self):
870 return self._loadfilter('decode')
870 return self._loadfilter('decode')
871
871
872 def adddatafilter(self, name, filter):
872 def adddatafilter(self, name, filter):
873 self._datafilters[name] = filter
873 self._datafilters[name] = filter
874
874
875 def wread(self, filename):
875 def wread(self, filename):
876 if self._link(filename):
876 if self._link(filename):
877 data = os.readlink(self.wjoin(filename))
877 data = os.readlink(self.wjoin(filename))
878 else:
878 else:
879 data = self.wopener.read(filename)
879 data = self.wopener.read(filename)
880 return self._filter(self._encodefilterpats, filename, data)
880 return self._filter(self._encodefilterpats, filename, data)
881
881
882 def wwrite(self, filename, data, flags):
882 def wwrite(self, filename, data, flags):
883 data = self._filter(self._decodefilterpats, filename, data)
883 data = self._filter(self._decodefilterpats, filename, data)
884 if 'l' in flags:
884 if 'l' in flags:
885 self.wopener.symlink(data, filename)
885 self.wopener.symlink(data, filename)
886 else:
886 else:
887 self.wopener.write(filename, data)
887 self.wopener.write(filename, data)
888 if 'x' in flags:
888 if 'x' in flags:
889 util.setflags(self.wjoin(filename), False, True)
889 util.setflags(self.wjoin(filename), False, True)
890
890
891 def wwritedata(self, filename, data):
891 def wwritedata(self, filename, data):
892 return self._filter(self._decodefilterpats, filename, data)
892 return self._filter(self._decodefilterpats, filename, data)
893
893
894 def transaction(self, desc):
894 def transaction(self, desc):
895 tr = self._transref and self._transref() or None
895 tr = self._transref and self._transref() or None
896 if tr and tr.running():
896 if tr and tr.running():
897 return tr.nest()
897 return tr.nest()
898
898
899 # abort here if the journal already exists
899 # abort here if the journal already exists
900 if os.path.exists(self.sjoin("journal")):
900 if os.path.exists(self.sjoin("journal")):
901 raise error.RepoError(
901 raise error.RepoError(
902 _("abandoned transaction found - run hg recover"))
902 _("abandoned transaction found - run hg recover"))
903
903
904 self._writejournal(desc)
904 self._writejournal(desc)
905 renames = [(x, undoname(x)) for x in self._journalfiles()]
905 renames = [(x, undoname(x)) for x in self._journalfiles()]
906
906
907 tr = transaction.transaction(self.ui.warn, self.sopener,
907 tr = transaction.transaction(self.ui.warn, self.sopener,
908 self.sjoin("journal"),
908 self.sjoin("journal"),
909 aftertrans(renames),
909 aftertrans(renames),
910 self.store.createmode)
910 self.store.createmode)
911 self._transref = weakref.ref(tr)
911 self._transref = weakref.ref(tr)
912 return tr
912 return tr
913
913
914 def _journalfiles(self):
914 def _journalfiles(self):
915 return (self.sjoin('journal'), self.join('journal.dirstate'),
915 return (self.sjoin('journal'), self.join('journal.dirstate'),
916 self.join('journal.branch'), self.join('journal.desc'),
916 self.join('journal.branch'), self.join('journal.desc'),
917 self.join('journal.bookmarks'),
917 self.join('journal.bookmarks'),
918 self.sjoin('journal.phaseroots'))
918 self.sjoin('journal.phaseroots'))
919
919
920 def undofiles(self):
920 def undofiles(self):
921 return [undoname(x) for x in self._journalfiles()]
921 return [undoname(x) for x in self._journalfiles()]
922
922
923 def _writejournal(self, desc):
923 def _writejournal(self, desc):
924 self.opener.write("journal.dirstate",
924 self.opener.write("journal.dirstate",
925 self.opener.tryread("dirstate"))
925 self.opener.tryread("dirstate"))
926 self.opener.write("journal.branch",
926 self.opener.write("journal.branch",
927 encoding.fromlocal(self.dirstate.branch()))
927 encoding.fromlocal(self.dirstate.branch()))
928 self.opener.write("journal.desc",
928 self.opener.write("journal.desc",
929 "%d\n%s\n" % (len(self), desc))
929 "%d\n%s\n" % (len(self), desc))
930 self.opener.write("journal.bookmarks",
930 self.opener.write("journal.bookmarks",
931 self.opener.tryread("bookmarks"))
931 self.opener.tryread("bookmarks"))
932 self.sopener.write("journal.phaseroots",
932 self.sopener.write("journal.phaseroots",
933 self.sopener.tryread("phaseroots"))
933 self.sopener.tryread("phaseroots"))
934
934
935 def recover(self):
935 def recover(self):
936 lock = self.lock()
936 lock = self.lock()
937 try:
937 try:
938 if os.path.exists(self.sjoin("journal")):
938 if os.path.exists(self.sjoin("journal")):
939 self.ui.status(_("rolling back interrupted transaction\n"))
939 self.ui.status(_("rolling back interrupted transaction\n"))
940 transaction.rollback(self.sopener, self.sjoin("journal"),
940 transaction.rollback(self.sopener, self.sjoin("journal"),
941 self.ui.warn)
941 self.ui.warn)
942 self.invalidate()
942 self.invalidate()
943 return True
943 return True
944 else:
944 else:
945 self.ui.warn(_("no interrupted transaction available\n"))
945 self.ui.warn(_("no interrupted transaction available\n"))
946 return False
946 return False
947 finally:
947 finally:
948 lock.release()
948 lock.release()
949
949
950 def rollback(self, dryrun=False, force=False):
950 def rollback(self, dryrun=False, force=False):
951 wlock = lock = None
951 wlock = lock = None
952 try:
952 try:
953 wlock = self.wlock()
953 wlock = self.wlock()
954 lock = self.lock()
954 lock = self.lock()
955 if os.path.exists(self.sjoin("undo")):
955 if os.path.exists(self.sjoin("undo")):
956 return self._rollback(dryrun, force)
956 return self._rollback(dryrun, force)
957 else:
957 else:
958 self.ui.warn(_("no rollback information available\n"))
958 self.ui.warn(_("no rollback information available\n"))
959 return 1
959 return 1
960 finally:
960 finally:
961 release(lock, wlock)
961 release(lock, wlock)
962
962
963 def _rollback(self, dryrun, force):
963 def _rollback(self, dryrun, force):
964 ui = self.ui
964 ui = self.ui
965 try:
965 try:
966 args = self.opener.read('undo.desc').splitlines()
966 args = self.opener.read('undo.desc').splitlines()
967 (oldlen, desc, detail) = (int(args[0]), args[1], None)
967 (oldlen, desc, detail) = (int(args[0]), args[1], None)
968 if len(args) >= 3:
968 if len(args) >= 3:
969 detail = args[2]
969 detail = args[2]
970 oldtip = oldlen - 1
970 oldtip = oldlen - 1
971
971
972 if detail and ui.verbose:
972 if detail and ui.verbose:
973 msg = (_('repository tip rolled back to revision %s'
973 msg = (_('repository tip rolled back to revision %s'
974 ' (undo %s: %s)\n')
974 ' (undo %s: %s)\n')
975 % (oldtip, desc, detail))
975 % (oldtip, desc, detail))
976 else:
976 else:
977 msg = (_('repository tip rolled back to revision %s'
977 msg = (_('repository tip rolled back to revision %s'
978 ' (undo %s)\n')
978 ' (undo %s)\n')
979 % (oldtip, desc))
979 % (oldtip, desc))
980 except IOError:
980 except IOError:
981 msg = _('rolling back unknown transaction\n')
981 msg = _('rolling back unknown transaction\n')
982 desc = None
982 desc = None
983
983
984 if not force and self['.'] != self['tip'] and desc == 'commit':
984 if not force and self['.'] != self['tip'] and desc == 'commit':
985 raise util.Abort(
985 raise util.Abort(
986 _('rollback of last commit while not checked out '
986 _('rollback of last commit while not checked out '
987 'may lose data'), hint=_('use -f to force'))
987 'may lose data'), hint=_('use -f to force'))
988
988
989 ui.status(msg)
989 ui.status(msg)
990 if dryrun:
990 if dryrun:
991 return 0
991 return 0
992
992
993 parents = self.dirstate.parents()
993 parents = self.dirstate.parents()
994 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
994 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
995 if os.path.exists(self.join('undo.bookmarks')):
995 if os.path.exists(self.join('undo.bookmarks')):
996 util.rename(self.join('undo.bookmarks'),
996 util.rename(self.join('undo.bookmarks'),
997 self.join('bookmarks'))
997 self.join('bookmarks'))
998 if os.path.exists(self.sjoin('undo.phaseroots')):
998 if os.path.exists(self.sjoin('undo.phaseroots')):
999 util.rename(self.sjoin('undo.phaseroots'),
999 util.rename(self.sjoin('undo.phaseroots'),
1000 self.sjoin('phaseroots'))
1000 self.sjoin('phaseroots'))
1001 self.invalidate()
1001 self.invalidate()
1002
1002
1003 parentgone = (parents[0] not in self.changelog.nodemap or
1003 parentgone = (parents[0] not in self.changelog.nodemap or
1004 parents[1] not in self.changelog.nodemap)
1004 parents[1] not in self.changelog.nodemap)
1005 if parentgone:
1005 if parentgone:
1006 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1006 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1007 try:
1007 try:
1008 branch = self.opener.read('undo.branch')
1008 branch = self.opener.read('undo.branch')
1009 self.dirstate.setbranch(branch)
1009 self.dirstate.setbranch(branch)
1010 except IOError:
1010 except IOError:
1011 ui.warn(_('named branch could not be reset: '
1011 ui.warn(_('named branch could not be reset: '
1012 'current branch is still \'%s\'\n')
1012 'current branch is still \'%s\'\n')
1013 % self.dirstate.branch())
1013 % self.dirstate.branch())
1014
1014
1015 self.dirstate.invalidate()
1015 self.dirstate.invalidate()
1016 parents = tuple([p.rev() for p in self.parents()])
1016 parents = tuple([p.rev() for p in self.parents()])
1017 if len(parents) > 1:
1017 if len(parents) > 1:
1018 ui.status(_('working directory now based on '
1018 ui.status(_('working directory now based on '
1019 'revisions %d and %d\n') % parents)
1019 'revisions %d and %d\n') % parents)
1020 else:
1020 else:
1021 ui.status(_('working directory now based on '
1021 ui.status(_('working directory now based on '
1022 'revision %d\n') % parents)
1022 'revision %d\n') % parents)
1023 # TODO: if we know which new heads may result from this rollback, pass
1023 # TODO: if we know which new heads may result from this rollback, pass
1024 # them to destroy(), which will prevent the branchhead cache from being
1024 # them to destroy(), which will prevent the branchhead cache from being
1025 # invalidated.
1025 # invalidated.
1026 self.destroyed()
1026 self.destroyed()
1027 return 0
1027 return 0
1028
1028
1029 def invalidatecaches(self):
1029 def invalidatecaches(self):
1030 def delcache(name):
1030 def delcache(name):
1031 try:
1031 try:
1032 delattr(self, name)
1032 delattr(self, name)
1033 except AttributeError:
1033 except AttributeError:
1034 pass
1034 pass
1035
1035
1036 delcache('_tagscache')
1036 delcache('_tagscache')
1037
1037
1038 self._branchcache = None # in UTF-8
1038 self._branchcache = None # in UTF-8
1039 self._branchcachetip = None
1039 self._branchcachetip = None
1040
1040
1041 def invalidatedirstate(self):
1041 def invalidatedirstate(self):
1042 '''Invalidates the dirstate, causing the next call to dirstate
1042 '''Invalidates the dirstate, causing the next call to dirstate
1043 to check if it was modified since the last time it was read,
1043 to check if it was modified since the last time it was read,
1044 rereading it if it has.
1044 rereading it if it has.
1045
1045
1046 This is different to dirstate.invalidate() that it doesn't always
1046 This is different to dirstate.invalidate() that it doesn't always
1047 rereads the dirstate. Use dirstate.invalidate() if you want to
1047 rereads the dirstate. Use dirstate.invalidate() if you want to
1048 explicitly read the dirstate again (i.e. restoring it to a previous
1048 explicitly read the dirstate again (i.e. restoring it to a previous
1049 known good state).'''
1049 known good state).'''
1050 if 'dirstate' in self.__dict__:
1050 if 'dirstate' in self.__dict__:
1051 for k in self.dirstate._filecache:
1051 for k in self.dirstate._filecache:
1052 try:
1052 try:
1053 delattr(self.dirstate, k)
1053 delattr(self.dirstate, k)
1054 except AttributeError:
1054 except AttributeError:
1055 pass
1055 pass
1056 delattr(self, 'dirstate')
1056 delattr(self, 'dirstate')
1057
1057
1058 def invalidate(self):
1058 def invalidate(self):
1059 for k in self._filecache:
1059 for k in self._filecache:
1060 # dirstate is invalidated separately in invalidatedirstate()
1060 # dirstate is invalidated separately in invalidatedirstate()
1061 if k == 'dirstate':
1061 if k == 'dirstate':
1062 continue
1062 continue
1063
1063
1064 try:
1064 try:
1065 delattr(self, k)
1065 delattr(self, k)
1066 except AttributeError:
1066 except AttributeError:
1067 pass
1067 pass
1068 self.invalidatecaches()
1068 self.invalidatecaches()
1069
1069
1070 # Discard all cache entries to force reloading everything.
1070 # Discard all cache entries to force reloading everything.
1071 self._filecache.clear()
1071 self._filecache.clear()
1072
1072
1073 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1073 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1074 try:
1074 try:
1075 l = lock.lock(lockname, 0, releasefn, desc=desc)
1075 l = lock.lock(lockname, 0, releasefn, desc=desc)
1076 except error.LockHeld, inst:
1076 except error.LockHeld, inst:
1077 if not wait:
1077 if not wait:
1078 raise
1078 raise
1079 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1079 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1080 (desc, inst.locker))
1080 (desc, inst.locker))
1081 # default to 600 seconds timeout
1081 # default to 600 seconds timeout
1082 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1082 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1083 releasefn, desc=desc)
1083 releasefn, desc=desc)
1084 if acquirefn:
1084 if acquirefn:
1085 acquirefn()
1085 acquirefn()
1086 return l
1086 return l
1087
1087
1088 def _afterlock(self, callback):
1088 def _afterlock(self, callback):
1089 """add a callback to the current repository lock.
1089 """add a callback to the current repository lock.
1090
1090
1091 The callback will be executed on lock release."""
1091 The callback will be executed on lock release."""
1092 l = self._lockref and self._lockref()
1092 l = self._lockref and self._lockref()
1093 if l:
1093 if l:
1094 l.postrelease.append(callback)
1094 l.postrelease.append(callback)
1095 else:
1095 else:
1096 callback()
1096 callback()
1097
1097
1098 def lock(self, wait=True):
1098 def lock(self, wait=True):
1099 '''Lock the repository store (.hg/store) and return a weak reference
1099 '''Lock the repository store (.hg/store) and return a weak reference
1100 to the lock. Use this before modifying the store (e.g. committing or
1100 to the lock. Use this before modifying the store (e.g. committing or
1101 stripping). If you are opening a transaction, get a lock as well.)'''
1101 stripping). If you are opening a transaction, get a lock as well.)'''
1102 l = self._lockref and self._lockref()
1102 l = self._lockref and self._lockref()
1103 if l is not None and l.held:
1103 if l is not None and l.held:
1104 l.lock()
1104 l.lock()
1105 return l
1105 return l
1106
1106
1107 def unlock():
1107 def unlock():
1108 self.store.write()
1108 self.store.write()
1109 if '_phasecache' in vars(self):
1109 if '_phasecache' in vars(self):
1110 self._phasecache.write()
1110 self._phasecache.write()
1111 for k, ce in self._filecache.items():
1111 for k, ce in self._filecache.items():
1112 if k == 'dirstate':
1112 if k == 'dirstate':
1113 continue
1113 continue
1114 ce.refresh()
1114 ce.refresh()
1115
1115
1116 l = self._lock(self.sjoin("lock"), wait, unlock,
1116 l = self._lock(self.sjoin("lock"), wait, unlock,
1117 self.invalidate, _('repository %s') % self.origroot)
1117 self.invalidate, _('repository %s') % self.origroot)
1118 self._lockref = weakref.ref(l)
1118 self._lockref = weakref.ref(l)
1119 return l
1119 return l
1120
1120
1121 def wlock(self, wait=True):
1121 def wlock(self, wait=True):
1122 '''Lock the non-store parts of the repository (everything under
1122 '''Lock the non-store parts of the repository (everything under
1123 .hg except .hg/store) and return a weak reference to the lock.
1123 .hg except .hg/store) and return a weak reference to the lock.
1124 Use this before modifying files in .hg.'''
1124 Use this before modifying files in .hg.'''
1125 l = self._wlockref and self._wlockref()
1125 l = self._wlockref and self._wlockref()
1126 if l is not None and l.held:
1126 if l is not None and l.held:
1127 l.lock()
1127 l.lock()
1128 return l
1128 return l
1129
1129
1130 def unlock():
1130 def unlock():
1131 self.dirstate.write()
1131 self.dirstate.write()
1132 ce = self._filecache.get('dirstate')
1132 ce = self._filecache.get('dirstate')
1133 if ce:
1133 if ce:
1134 ce.refresh()
1134 ce.refresh()
1135
1135
1136 l = self._lock(self.join("wlock"), wait, unlock,
1136 l = self._lock(self.join("wlock"), wait, unlock,
1137 self.invalidatedirstate, _('working directory of %s') %
1137 self.invalidatedirstate, _('working directory of %s') %
1138 self.origroot)
1138 self.origroot)
1139 self._wlockref = weakref.ref(l)
1139 self._wlockref = weakref.ref(l)
1140 return l
1140 return l
1141
1141
1142 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1142 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1143 """
1143 """
1144 commit an individual file as part of a larger transaction
1144 commit an individual file as part of a larger transaction
1145 """
1145 """
1146
1146
1147 fname = fctx.path()
1147 fname = fctx.path()
1148 text = fctx.data()
1148 text = fctx.data()
1149 flog = self.file(fname)
1149 flog = self.file(fname)
1150 fparent1 = manifest1.get(fname, nullid)
1150 fparent1 = manifest1.get(fname, nullid)
1151 fparent2 = fparent2o = manifest2.get(fname, nullid)
1151 fparent2 = fparent2o = manifest2.get(fname, nullid)
1152
1152
1153 meta = {}
1153 meta = {}
1154 copy = fctx.renamed()
1154 copy = fctx.renamed()
1155 if copy and copy[0] != fname:
1155 if copy and copy[0] != fname:
1156 # Mark the new revision of this file as a copy of another
1156 # Mark the new revision of this file as a copy of another
1157 # file. This copy data will effectively act as a parent
1157 # file. This copy data will effectively act as a parent
1158 # of this new revision. If this is a merge, the first
1158 # of this new revision. If this is a merge, the first
1159 # parent will be the nullid (meaning "look up the copy data")
1159 # parent will be the nullid (meaning "look up the copy data")
1160 # and the second one will be the other parent. For example:
1160 # and the second one will be the other parent. For example:
1161 #
1161 #
1162 # 0 --- 1 --- 3 rev1 changes file foo
1162 # 0 --- 1 --- 3 rev1 changes file foo
1163 # \ / rev2 renames foo to bar and changes it
1163 # \ / rev2 renames foo to bar and changes it
1164 # \- 2 -/ rev3 should have bar with all changes and
1164 # \- 2 -/ rev3 should have bar with all changes and
1165 # should record that bar descends from
1165 # should record that bar descends from
1166 # bar in rev2 and foo in rev1
1166 # bar in rev2 and foo in rev1
1167 #
1167 #
1168 # this allows this merge to succeed:
1168 # this allows this merge to succeed:
1169 #
1169 #
1170 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1170 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1171 # \ / merging rev3 and rev4 should use bar@rev2
1171 # \ / merging rev3 and rev4 should use bar@rev2
1172 # \- 2 --- 4 as the merge base
1172 # \- 2 --- 4 as the merge base
1173 #
1173 #
1174
1174
1175 cfname = copy[0]
1175 cfname = copy[0]
1176 crev = manifest1.get(cfname)
1176 crev = manifest1.get(cfname)
1177 newfparent = fparent2
1177 newfparent = fparent2
1178
1178
1179 if manifest2: # branch merge
1179 if manifest2: # branch merge
1180 if fparent2 == nullid or crev is None: # copied on remote side
1180 if fparent2 == nullid or crev is None: # copied on remote side
1181 if cfname in manifest2:
1181 if cfname in manifest2:
1182 crev = manifest2[cfname]
1182 crev = manifest2[cfname]
1183 newfparent = fparent1
1183 newfparent = fparent1
1184
1184
1185 # find source in nearest ancestor if we've lost track
1185 # find source in nearest ancestor if we've lost track
1186 if not crev:
1186 if not crev:
1187 self.ui.debug(" %s: searching for copy revision for %s\n" %
1187 self.ui.debug(" %s: searching for copy revision for %s\n" %
1188 (fname, cfname))
1188 (fname, cfname))
1189 for ancestor in self[None].ancestors():
1189 for ancestor in self[None].ancestors():
1190 if cfname in ancestor:
1190 if cfname in ancestor:
1191 crev = ancestor[cfname].filenode()
1191 crev = ancestor[cfname].filenode()
1192 break
1192 break
1193
1193
1194 if crev:
1194 if crev:
1195 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1195 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1196 meta["copy"] = cfname
1196 meta["copy"] = cfname
1197 meta["copyrev"] = hex(crev)
1197 meta["copyrev"] = hex(crev)
1198 fparent1, fparent2 = nullid, newfparent
1198 fparent1, fparent2 = nullid, newfparent
1199 else:
1199 else:
1200 self.ui.warn(_("warning: can't find ancestor for '%s' "
1200 self.ui.warn(_("warning: can't find ancestor for '%s' "
1201 "copied from '%s'!\n") % (fname, cfname))
1201 "copied from '%s'!\n") % (fname, cfname))
1202
1202
1203 elif fparent2 != nullid:
1203 elif fparent2 != nullid:
1204 # is one parent an ancestor of the other?
1204 # is one parent an ancestor of the other?
1205 fparentancestor = flog.ancestor(fparent1, fparent2)
1205 fparentancestor = flog.ancestor(fparent1, fparent2)
1206 if fparentancestor == fparent1:
1206 if fparentancestor == fparent1:
1207 fparent1, fparent2 = fparent2, nullid
1207 fparent1, fparent2 = fparent2, nullid
1208 elif fparentancestor == fparent2:
1208 elif fparentancestor == fparent2:
1209 fparent2 = nullid
1209 fparent2 = nullid
1210
1210
1211 # is the file changed?
1211 # is the file changed?
1212 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1212 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1213 changelist.append(fname)
1213 changelist.append(fname)
1214 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1214 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1215
1215
1216 # are just the flags changed during merge?
1216 # are just the flags changed during merge?
1217 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1217 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1218 changelist.append(fname)
1218 changelist.append(fname)
1219
1219
1220 return fparent1
1220 return fparent1
1221
1221
1222 def commit(self, text="", user=None, date=None, match=None, force=False,
1222 def commit(self, text="", user=None, date=None, match=None, force=False,
1223 editor=False, extra={}):
1223 editor=False, extra={}):
1224 """Add a new revision to current repository.
1224 """Add a new revision to current repository.
1225
1225
1226 Revision information is gathered from the working directory,
1226 Revision information is gathered from the working directory,
1227 match can be used to filter the committed files. If editor is
1227 match can be used to filter the committed files. If editor is
1228 supplied, it is called to get a commit message.
1228 supplied, it is called to get a commit message.
1229 """
1229 """
1230
1230
1231 def fail(f, msg):
1231 def fail(f, msg):
1232 raise util.Abort('%s: %s' % (f, msg))
1232 raise util.Abort('%s: %s' % (f, msg))
1233
1233
1234 if not match:
1234 if not match:
1235 match = matchmod.always(self.root, '')
1235 match = matchmod.always(self.root, '')
1236
1236
1237 if not force:
1237 if not force:
1238 vdirs = []
1238 vdirs = []
1239 match.dir = vdirs.append
1239 match.dir = vdirs.append
1240 match.bad = fail
1240 match.bad = fail
1241
1241
1242 wlock = self.wlock()
1242 wlock = self.wlock()
1243 try:
1243 try:
1244 wctx = self[None]
1244 wctx = self[None]
1245 merge = len(wctx.parents()) > 1
1245 merge = len(wctx.parents()) > 1
1246
1246
1247 if (not force and merge and match and
1247 if (not force and merge and match and
1248 (match.files() or match.anypats())):
1248 (match.files() or match.anypats())):
1249 raise util.Abort(_('cannot partially commit a merge '
1249 raise util.Abort(_('cannot partially commit a merge '
1250 '(do not specify files or patterns)'))
1250 '(do not specify files or patterns)'))
1251
1251
1252 changes = self.status(match=match, clean=force)
1252 changes = self.status(match=match, clean=force)
1253 if force:
1253 if force:
1254 changes[0].extend(changes[6]) # mq may commit unchanged files
1254 changes[0].extend(changes[6]) # mq may commit unchanged files
1255
1255
1256 # check subrepos
1256 # check subrepos
1257 subs = []
1257 subs = []
1258 commitsubs = set()
1258 commitsubs = set()
1259 newstate = wctx.substate.copy()
1259 newstate = wctx.substate.copy()
1260 # only manage subrepos and .hgsubstate if .hgsub is present
1260 # only manage subrepos and .hgsubstate if .hgsub is present
1261 if '.hgsub' in wctx:
1261 if '.hgsub' in wctx:
1262 # we'll decide whether to track this ourselves, thanks
1262 # we'll decide whether to track this ourselves, thanks
1263 if '.hgsubstate' in changes[0]:
1263 if '.hgsubstate' in changes[0]:
1264 changes[0].remove('.hgsubstate')
1264 changes[0].remove('.hgsubstate')
1265 if '.hgsubstate' in changes[2]:
1265 if '.hgsubstate' in changes[2]:
1266 changes[2].remove('.hgsubstate')
1266 changes[2].remove('.hgsubstate')
1267
1267
1268 # compare current state to last committed state
1268 # compare current state to last committed state
1269 # build new substate based on last committed state
1269 # build new substate based on last committed state
1270 oldstate = wctx.p1().substate
1270 oldstate = wctx.p1().substate
1271 for s in sorted(newstate.keys()):
1271 for s in sorted(newstate.keys()):
1272 if not match(s):
1272 if not match(s):
1273 # ignore working copy, use old state if present
1273 # ignore working copy, use old state if present
1274 if s in oldstate:
1274 if s in oldstate:
1275 newstate[s] = oldstate[s]
1275 newstate[s] = oldstate[s]
1276 continue
1276 continue
1277 if not force:
1277 if not force:
1278 raise util.Abort(
1278 raise util.Abort(
1279 _("commit with new subrepo %s excluded") % s)
1279 _("commit with new subrepo %s excluded") % s)
1280 if wctx.sub(s).dirty(True):
1280 if wctx.sub(s).dirty(True):
1281 if not self.ui.configbool('ui', 'commitsubrepos'):
1281 if not self.ui.configbool('ui', 'commitsubrepos'):
1282 raise util.Abort(
1282 raise util.Abort(
1283 _("uncommitted changes in subrepo %s") % s,
1283 _("uncommitted changes in subrepo %s") % s,
1284 hint=_("use --subrepos for recursive commit"))
1284 hint=_("use --subrepos for recursive commit"))
1285 subs.append(s)
1285 subs.append(s)
1286 commitsubs.add(s)
1286 commitsubs.add(s)
1287 else:
1287 else:
1288 bs = wctx.sub(s).basestate()
1288 bs = wctx.sub(s).basestate()
1289 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1289 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1290 if oldstate.get(s, (None, None, None))[1] != bs:
1290 if oldstate.get(s, (None, None, None))[1] != bs:
1291 subs.append(s)
1291 subs.append(s)
1292
1292
1293 # check for removed subrepos
1293 # check for removed subrepos
1294 for p in wctx.parents():
1294 for p in wctx.parents():
1295 r = [s for s in p.substate if s not in newstate]
1295 r = [s for s in p.substate if s not in newstate]
1296 subs += [s for s in r if match(s)]
1296 subs += [s for s in r if match(s)]
1297 if subs:
1297 if subs:
1298 if (not match('.hgsub') and
1298 if (not match('.hgsub') and
1299 '.hgsub' in (wctx.modified() + wctx.added())):
1299 '.hgsub' in (wctx.modified() + wctx.added())):
1300 raise util.Abort(
1300 raise util.Abort(
1301 _("can't commit subrepos without .hgsub"))
1301 _("can't commit subrepos without .hgsub"))
1302 changes[0].insert(0, '.hgsubstate')
1302 changes[0].insert(0, '.hgsubstate')
1303
1303
1304 elif '.hgsub' in changes[2]:
1304 elif '.hgsub' in changes[2]:
1305 # clean up .hgsubstate when .hgsub is removed
1305 # clean up .hgsubstate when .hgsub is removed
1306 if ('.hgsubstate' in wctx and
1306 if ('.hgsubstate' in wctx and
1307 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1307 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1308 changes[2].insert(0, '.hgsubstate')
1308 changes[2].insert(0, '.hgsubstate')
1309
1309
1310 # make sure all explicit patterns are matched
1310 # make sure all explicit patterns are matched
1311 if not force and match.files():
1311 if not force and match.files():
1312 matched = set(changes[0] + changes[1] + changes[2])
1312 matched = set(changes[0] + changes[1] + changes[2])
1313
1313
1314 for f in match.files():
1314 for f in match.files():
1315 if f == '.' or f in matched or f in wctx.substate:
1315 if f == '.' or f in matched or f in wctx.substate:
1316 continue
1316 continue
1317 if f in changes[3]: # missing
1317 if f in changes[3]: # missing
1318 fail(f, _('file not found!'))
1318 fail(f, _('file not found!'))
1319 if f in vdirs: # visited directory
1319 if f in vdirs: # visited directory
1320 d = f + '/'
1320 d = f + '/'
1321 for mf in matched:
1321 for mf in matched:
1322 if mf.startswith(d):
1322 if mf.startswith(d):
1323 break
1323 break
1324 else:
1324 else:
1325 fail(f, _("no match under directory!"))
1325 fail(f, _("no match under directory!"))
1326 elif f not in self.dirstate:
1326 elif f not in self.dirstate:
1327 fail(f, _("file not tracked!"))
1327 fail(f, _("file not tracked!"))
1328
1328
1329 if (not force and not extra.get("close") and not merge
1329 if (not force and not extra.get("close") and not merge
1330 and not (changes[0] or changes[1] or changes[2])
1330 and not (changes[0] or changes[1] or changes[2])
1331 and wctx.branch() == wctx.p1().branch()):
1331 and wctx.branch() == wctx.p1().branch()):
1332 return None
1332 return None
1333
1333
1334 if merge and changes[3]:
1334 if merge and changes[3]:
1335 raise util.Abort(_("cannot commit merge with missing files"))
1335 raise util.Abort(_("cannot commit merge with missing files"))
1336
1336
1337 ms = mergemod.mergestate(self)
1337 ms = mergemod.mergestate(self)
1338 for f in changes[0]:
1338 for f in changes[0]:
1339 if f in ms and ms[f] == 'u':
1339 if f in ms and ms[f] == 'u':
1340 raise util.Abort(_("unresolved merge conflicts "
1340 raise util.Abort(_("unresolved merge conflicts "
1341 "(see hg help resolve)"))
1341 "(see hg help resolve)"))
1342
1342
1343 cctx = context.workingctx(self, text, user, date, extra, changes)
1343 cctx = context.workingctx(self, text, user, date, extra, changes)
1344 if editor:
1344 if editor:
1345 cctx._text = editor(self, cctx, subs)
1345 cctx._text = editor(self, cctx, subs)
1346 edited = (text != cctx._text)
1346 edited = (text != cctx._text)
1347
1347
1348 # commit subs and write new state
1348 # commit subs and write new state
1349 if subs:
1349 if subs:
1350 for s in sorted(commitsubs):
1350 for s in sorted(commitsubs):
1351 sub = wctx.sub(s)
1351 sub = wctx.sub(s)
1352 self.ui.status(_('committing subrepository %s\n') %
1352 self.ui.status(_('committing subrepository %s\n') %
1353 subrepo.subrelpath(sub))
1353 subrepo.subrelpath(sub))
1354 sr = sub.commit(cctx._text, user, date)
1354 sr = sub.commit(cctx._text, user, date)
1355 newstate[s] = (newstate[s][0], sr)
1355 newstate[s] = (newstate[s][0], sr)
1356 subrepo.writestate(self, newstate)
1356 subrepo.writestate(self, newstate)
1357
1357
1358 # Save commit message in case this transaction gets rolled back
1358 # Save commit message in case this transaction gets rolled back
1359 # (e.g. by a pretxncommit hook). Leave the content alone on
1359 # (e.g. by a pretxncommit hook). Leave the content alone on
1360 # the assumption that the user will use the same editor again.
1360 # the assumption that the user will use the same editor again.
1361 msgfn = self.savecommitmessage(cctx._text)
1361 msgfn = self.savecommitmessage(cctx._text)
1362
1362
1363 p1, p2 = self.dirstate.parents()
1363 p1, p2 = self.dirstate.parents()
1364 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1364 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1365 try:
1365 try:
1366 self.hook("precommit", throw=True, parent1=hookp1,
1366 self.hook("precommit", throw=True, parent1=hookp1,
1367 parent2=hookp2)
1367 parent2=hookp2)
1368 ret = self.commitctx(cctx, True)
1368 ret = self.commitctx(cctx, True)
1369 except: # re-raises
1369 except: # re-raises
1370 if edited:
1370 if edited:
1371 self.ui.write(
1371 self.ui.write(
1372 _('note: commit message saved in %s\n') % msgfn)
1372 _('note: commit message saved in %s\n') % msgfn)
1373 raise
1373 raise
1374
1374
1375 # update bookmarks, dirstate and mergestate
1375 # update bookmarks, dirstate and mergestate
1376 bookmarks.update(self, [p1, p2], ret)
1376 bookmarks.update(self, [p1, p2], ret)
1377 for f in changes[0] + changes[1]:
1377 for f in changes[0] + changes[1]:
1378 self.dirstate.normal(f)
1378 self.dirstate.normal(f)
1379 for f in changes[2]:
1379 for f in changes[2]:
1380 self.dirstate.drop(f)
1380 self.dirstate.drop(f)
1381 self.dirstate.setparents(ret)
1381 self.dirstate.setparents(ret)
1382 ms.reset()
1382 ms.reset()
1383 finally:
1383 finally:
1384 wlock.release()
1384 wlock.release()
1385
1385
1386 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1386 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1387 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1387 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1388 self._afterlock(commithook)
1388 self._afterlock(commithook)
1389 return ret
1389 return ret
1390
1390
1391 def commitctx(self, ctx, error=False):
1391 def commitctx(self, ctx, error=False):
1392 """Add a new revision to current repository.
1392 """Add a new revision to current repository.
1393 Revision information is passed via the context argument.
1393 Revision information is passed via the context argument.
1394 """
1394 """
1395
1395
1396 tr = lock = None
1396 tr = lock = None
1397 removed = list(ctx.removed())
1397 removed = list(ctx.removed())
1398 p1, p2 = ctx.p1(), ctx.p2()
1398 p1, p2 = ctx.p1(), ctx.p2()
1399 user = ctx.user()
1399 user = ctx.user()
1400
1400
1401 lock = self.lock()
1401 lock = self.lock()
1402 try:
1402 try:
1403 tr = self.transaction("commit")
1403 tr = self.transaction("commit")
1404 trp = weakref.proxy(tr)
1404 trp = weakref.proxy(tr)
1405
1405
1406 if ctx.files():
1406 if ctx.files():
1407 m1 = p1.manifest().copy()
1407 m1 = p1.manifest().copy()
1408 m2 = p2.manifest()
1408 m2 = p2.manifest()
1409
1409
1410 # check in files
1410 # check in files
1411 new = {}
1411 new = {}
1412 changed = []
1412 changed = []
1413 linkrev = len(self)
1413 linkrev = len(self)
1414 for f in sorted(ctx.modified() + ctx.added()):
1414 for f in sorted(ctx.modified() + ctx.added()):
1415 self.ui.note(f + "\n")
1415 self.ui.note(f + "\n")
1416 try:
1416 try:
1417 fctx = ctx[f]
1417 fctx = ctx[f]
1418 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1418 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1419 changed)
1419 changed)
1420 m1.set(f, fctx.flags())
1420 m1.set(f, fctx.flags())
1421 except OSError, inst:
1421 except OSError, inst:
1422 self.ui.warn(_("trouble committing %s!\n") % f)
1422 self.ui.warn(_("trouble committing %s!\n") % f)
1423 raise
1423 raise
1424 except IOError, inst:
1424 except IOError, inst:
1425 errcode = getattr(inst, 'errno', errno.ENOENT)
1425 errcode = getattr(inst, 'errno', errno.ENOENT)
1426 if error or errcode and errcode != errno.ENOENT:
1426 if error or errcode and errcode != errno.ENOENT:
1427 self.ui.warn(_("trouble committing %s!\n") % f)
1427 self.ui.warn(_("trouble committing %s!\n") % f)
1428 raise
1428 raise
1429 else:
1429 else:
1430 removed.append(f)
1430 removed.append(f)
1431
1431
1432 # update manifest
1432 # update manifest
1433 m1.update(new)
1433 m1.update(new)
1434 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1434 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1435 drop = [f for f in removed if f in m1]
1435 drop = [f for f in removed if f in m1]
1436 for f in drop:
1436 for f in drop:
1437 del m1[f]
1437 del m1[f]
1438 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1438 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1439 p2.manifestnode(), (new, drop))
1439 p2.manifestnode(), (new, drop))
1440 files = changed + removed
1440 files = changed + removed
1441 else:
1441 else:
1442 mn = p1.manifestnode()
1442 mn = p1.manifestnode()
1443 files = []
1443 files = []
1444
1444
1445 # update changelog
1445 # update changelog
1446 self.changelog.delayupdate()
1446 self.changelog.delayupdate()
1447 n = self.changelog.add(mn, files, ctx.description(),
1447 n = self.changelog.add(mn, files, ctx.description(),
1448 trp, p1.node(), p2.node(),
1448 trp, p1.node(), p2.node(),
1449 user, ctx.date(), ctx.extra().copy())
1449 user, ctx.date(), ctx.extra().copy())
1450 p = lambda: self.changelog.writepending() and self.root or ""
1450 p = lambda: self.changelog.writepending() and self.root or ""
1451 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1451 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1452 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1452 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1453 parent2=xp2, pending=p)
1453 parent2=xp2, pending=p)
1454 self.changelog.finalize(trp)
1454 self.changelog.finalize(trp)
1455 # set the new commit is proper phase
1455 # set the new commit is proper phase
1456 targetphase = phases.newcommitphase(self.ui)
1456 targetphase = phases.newcommitphase(self.ui)
1457 if targetphase:
1457 if targetphase:
1458 # retract boundary do not alter parent changeset.
1458 # retract boundary do not alter parent changeset.
1459 # if a parent have higher the resulting phase will
1459 # if a parent have higher the resulting phase will
1460 # be compliant anyway
1460 # be compliant anyway
1461 #
1461 #
1462 # if minimal phase was 0 we don't need to retract anything
1462 # if minimal phase was 0 we don't need to retract anything
1463 phases.retractboundary(self, targetphase, [n])
1463 phases.retractboundary(self, targetphase, [n])
1464 tr.close()
1464 tr.close()
1465 self.updatebranchcache()
1465 self.updatebranchcache()
1466 return n
1466 return n
1467 finally:
1467 finally:
1468 if tr:
1468 if tr:
1469 tr.release()
1469 tr.release()
1470 lock.release()
1470 lock.release()
1471
1471
1472 def destroyed(self, newheadnodes=None):
1472 def destroyed(self, newheadnodes=None):
1473 '''Inform the repository that nodes have been destroyed.
1473 '''Inform the repository that nodes have been destroyed.
1474 Intended for use by strip and rollback, so there's a common
1474 Intended for use by strip and rollback, so there's a common
1475 place for anything that has to be done after destroying history.
1475 place for anything that has to be done after destroying history.
1476
1476
1477 If you know the branchheadcache was uptodate before nodes were removed
1477 If you know the branchheadcache was uptodate before nodes were removed
1478 and you also know the set of candidate new heads that may have resulted
1478 and you also know the set of candidate new heads that may have resulted
1479 from the destruction, you can set newheadnodes. This will enable the
1479 from the destruction, you can set newheadnodes. This will enable the
1480 code to update the branchheads cache, rather than having future code
1480 code to update the branchheads cache, rather than having future code
1481 decide it's invalid and regenrating it from scratch.
1481 decide it's invalid and regenrating it from scratch.
1482 '''
1482 '''
1483 # If we have info, newheadnodes, on how to update the branch cache, do
1483 # If we have info, newheadnodes, on how to update the branch cache, do
1484 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1484 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1485 # will be caught the next time it is read.
1485 # will be caught the next time it is read.
1486 if newheadnodes:
1486 if newheadnodes:
1487 tiprev = len(self) - 1
1487 tiprev = len(self) - 1
1488 ctxgen = (self[node] for node in newheadnodes
1488 ctxgen = (self[node] for node in newheadnodes
1489 if self.changelog.hasnode(node))
1489 if self.changelog.hasnode(node))
1490 self._updatebranchcache(self._branchcache, ctxgen)
1490 self._updatebranchcache(self._branchcache, ctxgen)
1491 self._writebranchcache(self._branchcache, self.changelog.tip(),
1491 self._writebranchcache(self._branchcache, self.changelog.tip(),
1492 tiprev)
1492 tiprev)
1493
1493
1494 # Ensure the persistent tag cache is updated. Doing it now
1494 # Ensure the persistent tag cache is updated. Doing it now
1495 # means that the tag cache only has to worry about destroyed
1495 # means that the tag cache only has to worry about destroyed
1496 # heads immediately after a strip/rollback. That in turn
1496 # heads immediately after a strip/rollback. That in turn
1497 # guarantees that "cachetip == currenttip" (comparing both rev
1497 # guarantees that "cachetip == currenttip" (comparing both rev
1498 # and node) always means no nodes have been added or destroyed.
1498 # and node) always means no nodes have been added or destroyed.
1499
1499
1500 # XXX this is suboptimal when qrefresh'ing: we strip the current
1500 # XXX this is suboptimal when qrefresh'ing: we strip the current
1501 # head, refresh the tag cache, then immediately add a new head.
1501 # head, refresh the tag cache, then immediately add a new head.
1502 # But I think doing it this way is necessary for the "instant
1502 # But I think doing it this way is necessary for the "instant
1503 # tag cache retrieval" case to work.
1503 # tag cache retrieval" case to work.
1504 self.invalidatecaches()
1504 self.invalidatecaches()
1505
1505
1506 def walk(self, match, node=None):
1506 def walk(self, match, node=None):
1507 '''
1507 '''
1508 walk recursively through the directory tree or a given
1508 walk recursively through the directory tree or a given
1509 changeset, finding all files matched by the match
1509 changeset, finding all files matched by the match
1510 function
1510 function
1511 '''
1511 '''
1512 return self[node].walk(match)
1512 return self[node].walk(match)
1513
1513
1514 def status(self, node1='.', node2=None, match=None,
1514 def status(self, node1='.', node2=None, match=None,
1515 ignored=False, clean=False, unknown=False,
1515 ignored=False, clean=False, unknown=False,
1516 listsubrepos=False):
1516 listsubrepos=False):
1517 """return status of files between two nodes or node and working
1517 """return status of files between two nodes or node and working
1518 directory.
1518 directory.
1519
1519
1520 If node1 is None, use the first dirstate parent instead.
1520 If node1 is None, use the first dirstate parent instead.
1521 If node2 is None, compare node1 with working directory.
1521 If node2 is None, compare node1 with working directory.
1522 """
1522 """
1523
1523
1524 def mfmatches(ctx):
1524 def mfmatches(ctx):
1525 mf = ctx.manifest().copy()
1525 mf = ctx.manifest().copy()
1526 if match.always():
1526 if match.always():
1527 return mf
1527 return mf
1528 for fn in mf.keys():
1528 for fn in mf.keys():
1529 if not match(fn):
1529 if not match(fn):
1530 del mf[fn]
1530 del mf[fn]
1531 return mf
1531 return mf
1532
1532
1533 if isinstance(node1, context.changectx):
1533 if isinstance(node1, context.changectx):
1534 ctx1 = node1
1534 ctx1 = node1
1535 else:
1535 else:
1536 ctx1 = self[node1]
1536 ctx1 = self[node1]
1537 if isinstance(node2, context.changectx):
1537 if isinstance(node2, context.changectx):
1538 ctx2 = node2
1538 ctx2 = node2
1539 else:
1539 else:
1540 ctx2 = self[node2]
1540 ctx2 = self[node2]
1541
1541
1542 working = ctx2.rev() is None
1542 working = ctx2.rev() is None
1543 parentworking = working and ctx1 == self['.']
1543 parentworking = working and ctx1 == self['.']
1544 match = match or matchmod.always(self.root, self.getcwd())
1544 match = match or matchmod.always(self.root, self.getcwd())
1545 listignored, listclean, listunknown = ignored, clean, unknown
1545 listignored, listclean, listunknown = ignored, clean, unknown
1546
1546
1547 # load earliest manifest first for caching reasons
1547 # load earliest manifest first for caching reasons
1548 if not working and ctx2.rev() < ctx1.rev():
1548 if not working and ctx2.rev() < ctx1.rev():
1549 ctx2.manifest()
1549 ctx2.manifest()
1550
1550
1551 if not parentworking:
1551 if not parentworking:
1552 def bad(f, msg):
1552 def bad(f, msg):
1553 # 'f' may be a directory pattern from 'match.files()',
1553 # 'f' may be a directory pattern from 'match.files()',
1554 # so 'f not in ctx1' is not enough
1554 # so 'f not in ctx1' is not enough
1555 if f not in ctx1 and f not in ctx1.dirs():
1555 if f not in ctx1 and f not in ctx1.dirs():
1556 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1556 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1557 match.bad = bad
1557 match.bad = bad
1558
1558
1559 if working: # we need to scan the working dir
1559 if working: # we need to scan the working dir
1560 subrepos = []
1560 subrepos = []
1561 if '.hgsub' in self.dirstate:
1561 if '.hgsub' in self.dirstate:
1562 subrepos = ctx2.substate.keys()
1562 subrepos = ctx2.substate.keys()
1563 s = self.dirstate.status(match, subrepos, listignored,
1563 s = self.dirstate.status(match, subrepos, listignored,
1564 listclean, listunknown)
1564 listclean, listunknown)
1565 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1565 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1566
1566
1567 # check for any possibly clean files
1567 # check for any possibly clean files
1568 if parentworking and cmp:
1568 if parentworking and cmp:
1569 fixup = []
1569 fixup = []
1570 # do a full compare of any files that might have changed
1570 # do a full compare of any files that might have changed
1571 for f in sorted(cmp):
1571 for f in sorted(cmp):
1572 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1572 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1573 or ctx1[f].cmp(ctx2[f])):
1573 or ctx1[f].cmp(ctx2[f])):
1574 modified.append(f)
1574 modified.append(f)
1575 else:
1575 else:
1576 fixup.append(f)
1576 fixup.append(f)
1577
1577
1578 # update dirstate for files that are actually clean
1578 # update dirstate for files that are actually clean
1579 if fixup:
1579 if fixup:
1580 if listclean:
1580 if listclean:
1581 clean += fixup
1581 clean += fixup
1582
1582
1583 try:
1583 try:
1584 # updating the dirstate is optional
1584 # updating the dirstate is optional
1585 # so we don't wait on the lock
1585 # so we don't wait on the lock
1586 wlock = self.wlock(False)
1586 wlock = self.wlock(False)
1587 try:
1587 try:
1588 for f in fixup:
1588 for f in fixup:
1589 self.dirstate.normal(f)
1589 self.dirstate.normal(f)
1590 finally:
1590 finally:
1591 wlock.release()
1591 wlock.release()
1592 except error.LockError:
1592 except error.LockError:
1593 pass
1593 pass
1594
1594
1595 if not parentworking:
1595 if not parentworking:
1596 mf1 = mfmatches(ctx1)
1596 mf1 = mfmatches(ctx1)
1597 if working:
1597 if working:
1598 # we are comparing working dir against non-parent
1598 # we are comparing working dir against non-parent
1599 # generate a pseudo-manifest for the working dir
1599 # generate a pseudo-manifest for the working dir
1600 mf2 = mfmatches(self['.'])
1600 mf2 = mfmatches(self['.'])
1601 for f in cmp + modified + added:
1601 for f in cmp + modified + added:
1602 mf2[f] = None
1602 mf2[f] = None
1603 mf2.set(f, ctx2.flags(f))
1603 mf2.set(f, ctx2.flags(f))
1604 for f in removed:
1604 for f in removed:
1605 if f in mf2:
1605 if f in mf2:
1606 del mf2[f]
1606 del mf2[f]
1607 else:
1607 else:
1608 # we are comparing two revisions
1608 # we are comparing two revisions
1609 deleted, unknown, ignored = [], [], []
1609 deleted, unknown, ignored = [], [], []
1610 mf2 = mfmatches(ctx2)
1610 mf2 = mfmatches(ctx2)
1611
1611
1612 modified, added, clean = [], [], []
1612 modified, added, clean = [], [], []
1613 withflags = mf1.withflags() | mf2.withflags()
1613 withflags = mf1.withflags() | mf2.withflags()
1614 for fn in mf2:
1614 for fn in mf2:
1615 if fn in mf1:
1615 if fn in mf1:
1616 if (fn not in deleted and
1616 if (fn not in deleted and
1617 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1617 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1618 (mf1[fn] != mf2[fn] and
1618 (mf1[fn] != mf2[fn] and
1619 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1619 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1620 modified.append(fn)
1620 modified.append(fn)
1621 elif listclean:
1621 elif listclean:
1622 clean.append(fn)
1622 clean.append(fn)
1623 del mf1[fn]
1623 del mf1[fn]
1624 elif fn not in deleted:
1624 elif fn not in deleted:
1625 added.append(fn)
1625 added.append(fn)
1626 removed = mf1.keys()
1626 removed = mf1.keys()
1627
1627
1628 if working and modified and not self.dirstate._checklink:
1628 if working and modified and not self.dirstate._checklink:
1629 # Symlink placeholders may get non-symlink-like contents
1629 # Symlink placeholders may get non-symlink-like contents
1630 # via user error or dereferencing by NFS or Samba servers,
1630 # via user error or dereferencing by NFS or Samba servers,
1631 # so we filter out any placeholders that don't look like a
1631 # so we filter out any placeholders that don't look like a
1632 # symlink
1632 # symlink
1633 sane = []
1633 sane = []
1634 for f in modified:
1634 for f in modified:
1635 if ctx2.flags(f) == 'l':
1635 if ctx2.flags(f) == 'l':
1636 d = ctx2[f].data()
1636 d = ctx2[f].data()
1637 if len(d) >= 1024 or '\n' in d or util.binary(d):
1637 if len(d) >= 1024 or '\n' in d or util.binary(d):
1638 self.ui.debug('ignoring suspect symlink placeholder'
1638 self.ui.debug('ignoring suspect symlink placeholder'
1639 ' "%s"\n' % f)
1639 ' "%s"\n' % f)
1640 continue
1640 continue
1641 sane.append(f)
1641 sane.append(f)
1642 modified = sane
1642 modified = sane
1643
1643
1644 r = modified, added, removed, deleted, unknown, ignored, clean
1644 r = modified, added, removed, deleted, unknown, ignored, clean
1645
1645
1646 if listsubrepos:
1646 if listsubrepos:
1647 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1647 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1648 if working:
1648 if working:
1649 rev2 = None
1649 rev2 = None
1650 else:
1650 else:
1651 rev2 = ctx2.substate[subpath][1]
1651 rev2 = ctx2.substate[subpath][1]
1652 try:
1652 try:
1653 submatch = matchmod.narrowmatcher(subpath, match)
1653 submatch = matchmod.narrowmatcher(subpath, match)
1654 s = sub.status(rev2, match=submatch, ignored=listignored,
1654 s = sub.status(rev2, match=submatch, ignored=listignored,
1655 clean=listclean, unknown=listunknown,
1655 clean=listclean, unknown=listunknown,
1656 listsubrepos=True)
1656 listsubrepos=True)
1657 for rfiles, sfiles in zip(r, s):
1657 for rfiles, sfiles in zip(r, s):
1658 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1658 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1659 except error.LookupError:
1659 except error.LookupError:
1660 self.ui.status(_("skipping missing subrepository: %s\n")
1660 self.ui.status(_("skipping missing subrepository: %s\n")
1661 % subpath)
1661 % subpath)
1662
1662
1663 for l in r:
1663 for l in r:
1664 l.sort()
1664 l.sort()
1665 return r
1665 return r
1666
1666
1667 def heads(self, start=None):
1667 def heads(self, start=None):
1668 heads = self.changelog.heads(start)
1668 heads = self.changelog.heads(start)
1669 # sort the output in rev descending order
1669 # sort the output in rev descending order
1670 return sorted(heads, key=self.changelog.rev, reverse=True)
1670 return sorted(heads, key=self.changelog.rev, reverse=True)
1671
1671
1672 def branchheads(self, branch=None, start=None, closed=False):
1672 def branchheads(self, branch=None, start=None, closed=False):
1673 '''return a (possibly filtered) list of heads for the given branch
1673 '''return a (possibly filtered) list of heads for the given branch
1674
1674
1675 Heads are returned in topological order, from newest to oldest.
1675 Heads are returned in topological order, from newest to oldest.
1676 If branch is None, use the dirstate branch.
1676 If branch is None, use the dirstate branch.
1677 If start is not None, return only heads reachable from start.
1677 If start is not None, return only heads reachable from start.
1678 If closed is True, return heads that are marked as closed as well.
1678 If closed is True, return heads that are marked as closed as well.
1679 '''
1679 '''
1680 if branch is None:
1680 if branch is None:
1681 branch = self[None].branch()
1681 branch = self[None].branch()
1682 branches = self.branchmap()
1682 branches = self.branchmap()
1683 if branch not in branches:
1683 if branch not in branches:
1684 return []
1684 return []
1685 # the cache returns heads ordered lowest to highest
1685 # the cache returns heads ordered lowest to highest
1686 bheads = list(reversed(branches[branch]))
1686 bheads = list(reversed(branches[branch]))
1687 if start is not None:
1687 if start is not None:
1688 # filter out the heads that cannot be reached from startrev
1688 # filter out the heads that cannot be reached from startrev
1689 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1689 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1690 bheads = [h for h in bheads if h in fbheads]
1690 bheads = [h for h in bheads if h in fbheads]
1691 if not closed:
1691 if not closed:
1692 bheads = [h for h in bheads if not self[h].closesbranch()]
1692 bheads = [h for h in bheads if not self[h].closesbranch()]
1693 return bheads
1693 return bheads
1694
1694
1695 def branches(self, nodes):
1695 def branches(self, nodes):
1696 if not nodes:
1696 if not nodes:
1697 nodes = [self.changelog.tip()]
1697 nodes = [self.changelog.tip()]
1698 b = []
1698 b = []
1699 for n in nodes:
1699 for n in nodes:
1700 t = n
1700 t = n
1701 while True:
1701 while True:
1702 p = self.changelog.parents(n)
1702 p = self.changelog.parents(n)
1703 if p[1] != nullid or p[0] == nullid:
1703 if p[1] != nullid or p[0] == nullid:
1704 b.append((t, n, p[0], p[1]))
1704 b.append((t, n, p[0], p[1]))
1705 break
1705 break
1706 n = p[0]
1706 n = p[0]
1707 return b
1707 return b
1708
1708
1709 def between(self, pairs):
1709 def between(self, pairs):
1710 r = []
1710 r = []
1711
1711
1712 for top, bottom in pairs:
1712 for top, bottom in pairs:
1713 n, l, i = top, [], 0
1713 n, l, i = top, [], 0
1714 f = 1
1714 f = 1
1715
1715
1716 while n != bottom and n != nullid:
1716 while n != bottom and n != nullid:
1717 p = self.changelog.parents(n)[0]
1717 p = self.changelog.parents(n)[0]
1718 if i == f:
1718 if i == f:
1719 l.append(n)
1719 l.append(n)
1720 f = f * 2
1720 f = f * 2
1721 n = p
1721 n = p
1722 i += 1
1722 i += 1
1723
1723
1724 r.append(l)
1724 r.append(l)
1725
1725
1726 return r
1726 return r
1727
1727
1728 def pull(self, remote, heads=None, force=False):
1728 def pull(self, remote, heads=None, force=False):
1729 # don't open transaction for nothing or you break future useful
1729 # don't open transaction for nothing or you break future useful
1730 # rollback call
1730 # rollback call
1731 tr = None
1731 tr = None
1732 trname = 'pull\n' + util.hidepassword(remote.url())
1732 trname = 'pull\n' + util.hidepassword(remote.url())
1733 lock = self.lock()
1733 lock = self.lock()
1734 try:
1734 try:
1735 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1735 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1736 force=force)
1736 force=force)
1737 common, fetch, rheads = tmp
1737 common, fetch, rheads = tmp
1738 if not fetch:
1738 if not fetch:
1739 self.ui.status(_("no changes found\n"))
1739 self.ui.status(_("no changes found\n"))
1740 added = []
1740 added = []
1741 result = 0
1741 result = 0
1742 else:
1742 else:
1743 tr = self.transaction(trname)
1743 tr = self.transaction(trname)
1744 if heads is None and list(common) == [nullid]:
1744 if heads is None and list(common) == [nullid]:
1745 self.ui.status(_("requesting all changes\n"))
1745 self.ui.status(_("requesting all changes\n"))
1746 elif heads is None and remote.capable('changegroupsubset'):
1746 elif heads is None and remote.capable('changegroupsubset'):
1747 # issue1320, avoid a race if remote changed after discovery
1747 # issue1320, avoid a race if remote changed after discovery
1748 heads = rheads
1748 heads = rheads
1749
1749
1750 if remote.capable('getbundle'):
1750 if remote.capable('getbundle'):
1751 cg = remote.getbundle('pull', common=common,
1751 cg = remote.getbundle('pull', common=common,
1752 heads=heads or rheads)
1752 heads=heads or rheads)
1753 elif heads is None:
1753 elif heads is None:
1754 cg = remote.changegroup(fetch, 'pull')
1754 cg = remote.changegroup(fetch, 'pull')
1755 elif not remote.capable('changegroupsubset'):
1755 elif not remote.capable('changegroupsubset'):
1756 raise util.Abort(_("partial pull cannot be done because "
1756 raise util.Abort(_("partial pull cannot be done because "
1757 "other repository doesn't support "
1757 "other repository doesn't support "
1758 "changegroupsubset."))
1758 "changegroupsubset."))
1759 else:
1759 else:
1760 cg = remote.changegroupsubset(fetch, heads, 'pull')
1760 cg = remote.changegroupsubset(fetch, heads, 'pull')
1761 clstart = len(self.changelog)
1761 clstart = len(self.changelog)
1762 result = self.addchangegroup(cg, 'pull', remote.url())
1762 result = self.addchangegroup(cg, 'pull', remote.url())
1763 clend = len(self.changelog)
1763 clend = len(self.changelog)
1764 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1764 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1765
1765
1766 # compute target subset
1766 # compute target subset
1767 if heads is None:
1767 if heads is None:
1768 # We pulled every thing possible
1768 # We pulled every thing possible
1769 # sync on everything common
1769 # sync on everything common
1770 subset = common + added
1770 subset = common + added
1771 else:
1771 else:
1772 # We pulled a specific subset
1772 # We pulled a specific subset
1773 # sync on this subset
1773 # sync on this subset
1774 subset = heads
1774 subset = heads
1775
1775
1776 # Get remote phases data from remote
1776 # Get remote phases data from remote
1777 remotephases = remote.listkeys('phases')
1777 remotephases = remote.listkeys('phases')
1778 publishing = bool(remotephases.get('publishing', False))
1778 publishing = bool(remotephases.get('publishing', False))
1779 if remotephases and not publishing:
1779 if remotephases and not publishing:
1780 # remote is new and unpublishing
1780 # remote is new and unpublishing
1781 pheads, _dr = phases.analyzeremotephases(self, subset,
1781 pheads, _dr = phases.analyzeremotephases(self, subset,
1782 remotephases)
1782 remotephases)
1783 phases.advanceboundary(self, phases.public, pheads)
1783 phases.advanceboundary(self, phases.public, pheads)
1784 phases.advanceboundary(self, phases.draft, subset)
1784 phases.advanceboundary(self, phases.draft, subset)
1785 else:
1785 else:
1786 # Remote is old or publishing all common changesets
1786 # Remote is old or publishing all common changesets
1787 # should be seen as public
1787 # should be seen as public
1788 phases.advanceboundary(self, phases.public, subset)
1788 phases.advanceboundary(self, phases.public, subset)
1789
1789
1790 if obsolete._enabled:
1790 if obsolete._enabled:
1791 self.ui.debug('fetching remote obsolete markers')
1791 self.ui.debug('fetching remote obsolete markers')
1792 remoteobs = remote.listkeys('obsolete')
1792 remoteobs = remote.listkeys('obsolete')
1793 if 'dump0' in remoteobs:
1793 if 'dump0' in remoteobs:
1794 if tr is None:
1794 if tr is None:
1795 tr = self.transaction(trname)
1795 tr = self.transaction(trname)
1796 for key in sorted(remoteobs, reverse=True):
1796 for key in sorted(remoteobs, reverse=True):
1797 if key.startswith('dump'):
1797 if key.startswith('dump'):
1798 data = base85.b85decode(remoteobs[key])
1798 data = base85.b85decode(remoteobs[key])
1799 self.obsstore.mergemarkers(tr, data)
1799 self.obsstore.mergemarkers(tr, data)
1800 if tr is not None:
1800 if tr is not None:
1801 tr.close()
1801 tr.close()
1802 finally:
1802 finally:
1803 if tr is not None:
1803 if tr is not None:
1804 tr.release()
1804 tr.release()
1805 lock.release()
1805 lock.release()
1806
1806
1807 return result
1807 return result
1808
1808
1809 def checkpush(self, force, revs):
1809 def checkpush(self, force, revs):
1810 """Extensions can override this function if additional checks have
1810 """Extensions can override this function if additional checks have
1811 to be performed before pushing, or call it if they override push
1811 to be performed before pushing, or call it if they override push
1812 command.
1812 command.
1813 """
1813 """
1814 pass
1814 pass
1815
1815
1816 def push(self, remote, force=False, revs=None, newbranch=False):
1816 def push(self, remote, force=False, revs=None, newbranch=False):
1817 '''Push outgoing changesets (limited by revs) from the current
1817 '''Push outgoing changesets (limited by revs) from the current
1818 repository to remote. Return an integer:
1818 repository to remote. Return an integer:
1819 - None means nothing to push
1819 - None means nothing to push
1820 - 0 means HTTP error
1820 - 0 means HTTP error
1821 - 1 means we pushed and remote head count is unchanged *or*
1821 - 1 means we pushed and remote head count is unchanged *or*
1822 we have outgoing changesets but refused to push
1822 we have outgoing changesets but refused to push
1823 - other values as described by addchangegroup()
1823 - other values as described by addchangegroup()
1824 '''
1824 '''
1825 # there are two ways to push to remote repo:
1825 # there are two ways to push to remote repo:
1826 #
1826 #
1827 # addchangegroup assumes local user can lock remote
1827 # addchangegroup assumes local user can lock remote
1828 # repo (local filesystem, old ssh servers).
1828 # repo (local filesystem, old ssh servers).
1829 #
1829 #
1830 # unbundle assumes local user cannot lock remote repo (new ssh
1830 # unbundle assumes local user cannot lock remote repo (new ssh
1831 # servers, http servers).
1831 # servers, http servers).
1832
1832
1833 if not remote.canpush():
1833 if not remote.canpush():
1834 raise util.Abort(_("destination does not support push"))
1834 raise util.Abort(_("destination does not support push"))
1835 # get local lock as we might write phase data
1835 # get local lock as we might write phase data
1836 locallock = self.lock()
1836 locallock = self.lock()
1837 try:
1837 try:
1838 self.checkpush(force, revs)
1838 self.checkpush(force, revs)
1839 lock = None
1839 lock = None
1840 unbundle = remote.capable('unbundle')
1840 unbundle = remote.capable('unbundle')
1841 if not unbundle:
1841 if not unbundle:
1842 lock = remote.lock()
1842 lock = remote.lock()
1843 try:
1843 try:
1844 # discovery
1844 # discovery
1845 fci = discovery.findcommonincoming
1845 fci = discovery.findcommonincoming
1846 commoninc = fci(self, remote, force=force)
1846 commoninc = fci(self, remote, force=force)
1847 common, inc, remoteheads = commoninc
1847 common, inc, remoteheads = commoninc
1848 fco = discovery.findcommonoutgoing
1848 fco = discovery.findcommonoutgoing
1849 outgoing = fco(self, remote, onlyheads=revs,
1849 outgoing = fco(self, remote, onlyheads=revs,
1850 commoninc=commoninc, force=force)
1850 commoninc=commoninc, force=force)
1851
1851
1852
1852
1853 if not outgoing.missing:
1853 if not outgoing.missing:
1854 # nothing to push
1854 # nothing to push
1855 scmutil.nochangesfound(self.ui, self, outgoing.excluded)
1855 scmutil.nochangesfound(self.ui, self, outgoing.excluded)
1856 ret = None
1856 ret = None
1857 else:
1857 else:
1858 # something to push
1858 # something to push
1859 if not force:
1859 if not force:
1860 # if self.obsstore == False --> no obsolete
1860 # if self.obsstore == False --> no obsolete
1861 # then, save the iteration
1861 # then, save the iteration
1862 if self.obsstore:
1862 if self.obsstore:
1863 # this message are here for 80 char limit reason
1863 # this message are here for 80 char limit reason
1864 mso = _("push includes an obsolete changeset: %s!")
1864 mso = _("push includes an obsolete changeset: %s!")
1865 msu = _("push includes an unstable changeset: %s!")
1865 msu = _("push includes an unstable changeset: %s!")
1866 # If we are to push if there is at least one
1866 # If we are to push if there is at least one
1867 # obsolete or unstable changeset in missing, at
1867 # obsolete or unstable changeset in missing, at
1868 # least one of the missinghead will be obsolete or
1868 # least one of the missinghead will be obsolete or
1869 # unstable. So checking heads only is ok
1869 # unstable. So checking heads only is ok
1870 for node in outgoing.missingheads:
1870 for node in outgoing.missingheads:
1871 ctx = self[node]
1871 ctx = self[node]
1872 if ctx.obsolete():
1872 if ctx.obsolete():
1873 raise util.Abort(_(mso) % ctx)
1873 raise util.Abort(_(mso) % ctx)
1874 elif ctx.unstable():
1874 elif ctx.unstable():
1875 raise util.Abort(_(msu) % ctx)
1875 raise util.Abort(_(msu) % ctx)
1876 discovery.checkheads(self, remote, outgoing,
1876 discovery.checkheads(self, remote, outgoing,
1877 remoteheads, newbranch,
1877 remoteheads, newbranch,
1878 bool(inc))
1878 bool(inc))
1879
1879
1880 # create a changegroup from local
1880 # create a changegroup from local
1881 if revs is None and not outgoing.excluded:
1881 if revs is None and not outgoing.excluded:
1882 # push everything,
1882 # push everything,
1883 # use the fast path, no race possible on push
1883 # use the fast path, no race possible on push
1884 cg = self._changegroup(outgoing.missing, 'push')
1884 cg = self._changegroup(outgoing.missing, 'push')
1885 else:
1885 else:
1886 cg = self.getlocalbundle('push', outgoing)
1886 cg = self.getlocalbundle('push', outgoing)
1887
1887
1888 # apply changegroup to remote
1888 # apply changegroup to remote
1889 if unbundle:
1889 if unbundle:
1890 # local repo finds heads on server, finds out what
1890 # local repo finds heads on server, finds out what
1891 # revs it must push. once revs transferred, if server
1891 # revs it must push. once revs transferred, if server
1892 # finds it has different heads (someone else won
1892 # finds it has different heads (someone else won
1893 # commit/push race), server aborts.
1893 # commit/push race), server aborts.
1894 if force:
1894 if force:
1895 remoteheads = ['force']
1895 remoteheads = ['force']
1896 # ssh: return remote's addchangegroup()
1896 # ssh: return remote's addchangegroup()
1897 # http: return remote's addchangegroup() or 0 for error
1897 # http: return remote's addchangegroup() or 0 for error
1898 ret = remote.unbundle(cg, remoteheads, 'push')
1898 ret = remote.unbundle(cg, remoteheads, 'push')
1899 else:
1899 else:
1900 # we return an integer indicating remote head count
1900 # we return an integer indicating remote head count
1901 # change
1901 # change
1902 ret = remote.addchangegroup(cg, 'push', self.url())
1902 ret = remote.addchangegroup(cg, 'push', self.url())
1903
1903
1904 if ret:
1904 if ret:
1905 # push succeed, synchonize target of the push
1905 # push succeed, synchonize target of the push
1906 cheads = outgoing.missingheads
1906 cheads = outgoing.missingheads
1907 elif revs is None:
1907 elif revs is None:
1908 # All out push fails. synchronize all common
1908 # All out push fails. synchronize all common
1909 cheads = outgoing.commonheads
1909 cheads = outgoing.commonheads
1910 else:
1910 else:
1911 # I want cheads = heads(::missingheads and ::commonheads)
1911 # I want cheads = heads(::missingheads and ::commonheads)
1912 # (missingheads is revs with secret changeset filtered out)
1912 # (missingheads is revs with secret changeset filtered out)
1913 #
1913 #
1914 # This can be expressed as:
1914 # This can be expressed as:
1915 # cheads = ( (missingheads and ::commonheads)
1915 # cheads = ( (missingheads and ::commonheads)
1916 # + (commonheads and ::missingheads))"
1916 # + (commonheads and ::missingheads))"
1917 # )
1917 # )
1918 #
1918 #
1919 # while trying to push we already computed the following:
1919 # while trying to push we already computed the following:
1920 # common = (::commonheads)
1920 # common = (::commonheads)
1921 # missing = ((commonheads::missingheads) - commonheads)
1921 # missing = ((commonheads::missingheads) - commonheads)
1922 #
1922 #
1923 # We can pick:
1923 # We can pick:
1924 # * missingheads part of comon (::commonheads)
1924 # * missingheads part of comon (::commonheads)
1925 common = set(outgoing.common)
1925 common = set(outgoing.common)
1926 cheads = [node for node in revs if node in common]
1926 cheads = [node for node in revs if node in common]
1927 # and
1927 # and
1928 # * commonheads parents on missing
1928 # * commonheads parents on missing
1929 revset = self.set('%ln and parents(roots(%ln))',
1929 revset = self.set('%ln and parents(roots(%ln))',
1930 outgoing.commonheads,
1930 outgoing.commonheads,
1931 outgoing.missing)
1931 outgoing.missing)
1932 cheads.extend(c.node() for c in revset)
1932 cheads.extend(c.node() for c in revset)
1933 # even when we don't push, exchanging phase data is useful
1933 # even when we don't push, exchanging phase data is useful
1934 remotephases = remote.listkeys('phases')
1934 remotephases = remote.listkeys('phases')
1935 if not remotephases: # old server or public only repo
1935 if not remotephases: # old server or public only repo
1936 phases.advanceboundary(self, phases.public, cheads)
1936 phases.advanceboundary(self, phases.public, cheads)
1937 # don't push any phase data as there is nothing to push
1937 # don't push any phase data as there is nothing to push
1938 else:
1938 else:
1939 ana = phases.analyzeremotephases(self, cheads, remotephases)
1939 ana = phases.analyzeremotephases(self, cheads, remotephases)
1940 pheads, droots = ana
1940 pheads, droots = ana
1941 ### Apply remote phase on local
1941 ### Apply remote phase on local
1942 if remotephases.get('publishing', False):
1942 if remotephases.get('publishing', False):
1943 phases.advanceboundary(self, phases.public, cheads)
1943 phases.advanceboundary(self, phases.public, cheads)
1944 else: # publish = False
1944 else: # publish = False
1945 phases.advanceboundary(self, phases.public, pheads)
1945 phases.advanceboundary(self, phases.public, pheads)
1946 phases.advanceboundary(self, phases.draft, cheads)
1946 phases.advanceboundary(self, phases.draft, cheads)
1947 ### Apply local phase on remote
1947 ### Apply local phase on remote
1948
1948
1949 # Get the list of all revs draft on remote by public here.
1949 # Get the list of all revs draft on remote by public here.
1950 # XXX Beware that revset break if droots is not strictly
1950 # XXX Beware that revset break if droots is not strictly
1951 # XXX root we may want to ensure it is but it is costly
1951 # XXX root we may want to ensure it is but it is costly
1952 outdated = self.set('heads((%ln::%ln) and public())',
1952 outdated = self.set('heads((%ln::%ln) and public())',
1953 droots, cheads)
1953 droots, cheads)
1954 for newremotehead in outdated:
1954 for newremotehead in outdated:
1955 r = remote.pushkey('phases',
1955 r = remote.pushkey('phases',
1956 newremotehead.hex(),
1956 newremotehead.hex(),
1957 str(phases.draft),
1957 str(phases.draft),
1958 str(phases.public))
1958 str(phases.public))
1959 if not r:
1959 if not r:
1960 self.ui.warn(_('updating %s to public failed!\n')
1960 self.ui.warn(_('updating %s to public failed!\n')
1961 % newremotehead)
1961 % newremotehead)
1962 self.ui.debug('try to push obsolete markers to remote\n')
1962 self.ui.debug('try to push obsolete markers to remote\n')
1963 if (obsolete._enabled and self.obsstore and
1963 if (obsolete._enabled and self.obsstore and
1964 'obsolete' in remote.listkeys('namespaces')):
1964 'obsolete' in remote.listkeys('namespaces')):
1965 rslts = []
1965 rslts = []
1966 remotedata = self.listkeys('obsolete')
1966 remotedata = self.listkeys('obsolete')
1967 for key in sorted(remotedata, reverse=True):
1967 for key in sorted(remotedata, reverse=True):
1968 # reverse sort to ensure we end with dump0
1968 # reverse sort to ensure we end with dump0
1969 data = remotedata[key]
1969 data = remotedata[key]
1970 rslts.append(remote.pushkey('obsolete', key, '', data))
1970 rslts.append(remote.pushkey('obsolete', key, '', data))
1971 if [r for r in rslts if not r]:
1971 if [r for r in rslts if not r]:
1972 msg = _('failed to push some obsolete markers!\n')
1972 msg = _('failed to push some obsolete markers!\n')
1973 self.ui.warn(msg)
1973 self.ui.warn(msg)
1974 finally:
1974 finally:
1975 if lock is not None:
1975 if lock is not None:
1976 lock.release()
1976 lock.release()
1977 finally:
1977 finally:
1978 locallock.release()
1978 locallock.release()
1979
1979
1980 self.ui.debug("checking for updated bookmarks\n")
1980 self.ui.debug("checking for updated bookmarks\n")
1981 rb = remote.listkeys('bookmarks')
1981 rb = remote.listkeys('bookmarks')
1982 for k in rb.keys():
1982 for k in rb.keys():
1983 if k in self._bookmarks:
1983 if k in self._bookmarks:
1984 nr, nl = rb[k], hex(self._bookmarks[k])
1984 nr, nl = rb[k], hex(self._bookmarks[k])
1985 if nr in self:
1985 if nr in self:
1986 cr = self[nr]
1986 cr = self[nr]
1987 cl = self[nl]
1987 cl = self[nl]
1988 if cl in cr.descendants():
1988 if cl in cr.descendants():
1989 r = remote.pushkey('bookmarks', k, nr, nl)
1989 r = remote.pushkey('bookmarks', k, nr, nl)
1990 if r:
1990 if r:
1991 self.ui.status(_("updating bookmark %s\n") % k)
1991 self.ui.status(_("updating bookmark %s\n") % k)
1992 else:
1992 else:
1993 self.ui.warn(_('updating bookmark %s'
1993 self.ui.warn(_('updating bookmark %s'
1994 ' failed!\n') % k)
1994 ' failed!\n') % k)
1995
1995
1996 return ret
1996 return ret
1997
1997
1998 def changegroupinfo(self, nodes, source):
1998 def changegroupinfo(self, nodes, source):
1999 if self.ui.verbose or source == 'bundle':
1999 if self.ui.verbose or source == 'bundle':
2000 self.ui.status(_("%d changesets found\n") % len(nodes))
2000 self.ui.status(_("%d changesets found\n") % len(nodes))
2001 if self.ui.debugflag:
2001 if self.ui.debugflag:
2002 self.ui.debug("list of changesets:\n")
2002 self.ui.debug("list of changesets:\n")
2003 for node in nodes:
2003 for node in nodes:
2004 self.ui.debug("%s\n" % hex(node))
2004 self.ui.debug("%s\n" % hex(node))
2005
2005
2006 def changegroupsubset(self, bases, heads, source):
2006 def changegroupsubset(self, bases, heads, source):
2007 """Compute a changegroup consisting of all the nodes that are
2007 """Compute a changegroup consisting of all the nodes that are
2008 descendants of any of the bases and ancestors of any of the heads.
2008 descendants of any of the bases and ancestors of any of the heads.
2009 Return a chunkbuffer object whose read() method will return
2009 Return a chunkbuffer object whose read() method will return
2010 successive changegroup chunks.
2010 successive changegroup chunks.
2011
2011
2012 It is fairly complex as determining which filenodes and which
2012 It is fairly complex as determining which filenodes and which
2013 manifest nodes need to be included for the changeset to be complete
2013 manifest nodes need to be included for the changeset to be complete
2014 is non-trivial.
2014 is non-trivial.
2015
2015
2016 Another wrinkle is doing the reverse, figuring out which changeset in
2016 Another wrinkle is doing the reverse, figuring out which changeset in
2017 the changegroup a particular filenode or manifestnode belongs to.
2017 the changegroup a particular filenode or manifestnode belongs to.
2018 """
2018 """
2019 cl = self.changelog
2019 cl = self.changelog
2020 if not bases:
2020 if not bases:
2021 bases = [nullid]
2021 bases = [nullid]
2022 csets, bases, heads = cl.nodesbetween(bases, heads)
2022 csets, bases, heads = cl.nodesbetween(bases, heads)
2023 # We assume that all ancestors of bases are known
2023 # We assume that all ancestors of bases are known
2024 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2024 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2025 return self._changegroupsubset(common, csets, heads, source)
2025 return self._changegroupsubset(common, csets, heads, source)
2026
2026
2027 def getlocalbundle(self, source, outgoing):
2027 def getlocalbundle(self, source, outgoing):
2028 """Like getbundle, but taking a discovery.outgoing as an argument.
2028 """Like getbundle, but taking a discovery.outgoing as an argument.
2029
2029
2030 This is only implemented for local repos and reuses potentially
2030 This is only implemented for local repos and reuses potentially
2031 precomputed sets in outgoing."""
2031 precomputed sets in outgoing."""
2032 if not outgoing.missing:
2032 if not outgoing.missing:
2033 return None
2033 return None
2034 return self._changegroupsubset(outgoing.common,
2034 return self._changegroupsubset(outgoing.common,
2035 outgoing.missing,
2035 outgoing.missing,
2036 outgoing.missingheads,
2036 outgoing.missingheads,
2037 source)
2037 source)
2038
2038
2039 def getbundle(self, source, heads=None, common=None):
2039 def getbundle(self, source, heads=None, common=None):
2040 """Like changegroupsubset, but returns the set difference between the
2040 """Like changegroupsubset, but returns the set difference between the
2041 ancestors of heads and the ancestors common.
2041 ancestors of heads and the ancestors common.
2042
2042
2043 If heads is None, use the local heads. If common is None, use [nullid].
2043 If heads is None, use the local heads. If common is None, use [nullid].
2044
2044
2045 The nodes in common might not all be known locally due to the way the
2045 The nodes in common might not all be known locally due to the way the
2046 current discovery protocol works.
2046 current discovery protocol works.
2047 """
2047 """
2048 cl = self.changelog
2048 cl = self.changelog
2049 if common:
2049 if common:
2050 nm = cl.nodemap
2050 nm = cl.nodemap
2051 common = [n for n in common if n in nm]
2051 common = [n for n in common if n in nm]
2052 else:
2052 else:
2053 common = [nullid]
2053 common = [nullid]
2054 if not heads:
2054 if not heads:
2055 heads = cl.heads()
2055 heads = cl.heads()
2056 return self.getlocalbundle(source,
2056 return self.getlocalbundle(source,
2057 discovery.outgoing(cl, common, heads))
2057 discovery.outgoing(cl, common, heads))
2058
2058
2059 def _changegroupsubset(self, commonrevs, csets, heads, source):
2059 def _changegroupsubset(self, commonrevs, csets, heads, source):
2060
2060
2061 cl = self.changelog
2061 cl = self.changelog
2062 mf = self.manifest
2062 mf = self.manifest
2063 mfs = {} # needed manifests
2063 mfs = {} # needed manifests
2064 fnodes = {} # needed file nodes
2064 fnodes = {} # needed file nodes
2065 changedfiles = set()
2065 changedfiles = set()
2066 fstate = ['', {}]
2066 fstate = ['', {}]
2067 count = [0, 0]
2067 count = [0, 0]
2068
2068
2069 # can we go through the fast path ?
2069 # can we go through the fast path ?
2070 heads.sort()
2070 heads.sort()
2071 if heads == sorted(self.heads()):
2071 if heads == sorted(self.heads()):
2072 return self._changegroup(csets, source)
2072 return self._changegroup(csets, source)
2073
2073
2074 # slow path
2074 # slow path
2075 self.hook('preoutgoing', throw=True, source=source)
2075 self.hook('preoutgoing', throw=True, source=source)
2076 self.changegroupinfo(csets, source)
2076 self.changegroupinfo(csets, source)
2077
2077
2078 # filter any nodes that claim to be part of the known set
2078 # filter any nodes that claim to be part of the known set
2079 def prune(revlog, missing):
2079 def prune(revlog, missing):
2080 rr, rl = revlog.rev, revlog.linkrev
2080 rr, rl = revlog.rev, revlog.linkrev
2081 return [n for n in missing
2081 return [n for n in missing
2082 if rl(rr(n)) not in commonrevs]
2082 if rl(rr(n)) not in commonrevs]
2083
2083
2084 progress = self.ui.progress
2084 progress = self.ui.progress
2085 _bundling = _('bundling')
2085 _bundling = _('bundling')
2086 _changesets = _('changesets')
2086 _changesets = _('changesets')
2087 _manifests = _('manifests')
2087 _manifests = _('manifests')
2088 _files = _('files')
2088 _files = _('files')
2089
2089
2090 def lookup(revlog, x):
2090 def lookup(revlog, x):
2091 if revlog == cl:
2091 if revlog == cl:
2092 c = cl.read(x)
2092 c = cl.read(x)
2093 changedfiles.update(c[3])
2093 changedfiles.update(c[3])
2094 mfs.setdefault(c[0], x)
2094 mfs.setdefault(c[0], x)
2095 count[0] += 1
2095 count[0] += 1
2096 progress(_bundling, count[0],
2096 progress(_bundling, count[0],
2097 unit=_changesets, total=count[1])
2097 unit=_changesets, total=count[1])
2098 return x
2098 return x
2099 elif revlog == mf:
2099 elif revlog == mf:
2100 clnode = mfs[x]
2100 clnode = mfs[x]
2101 mdata = mf.readfast(x)
2101 mdata = mf.readfast(x)
2102 for f, n in mdata.iteritems():
2102 for f, n in mdata.iteritems():
2103 if f in changedfiles:
2103 if f in changedfiles:
2104 fnodes[f].setdefault(n, clnode)
2104 fnodes[f].setdefault(n, clnode)
2105 count[0] += 1
2105 count[0] += 1
2106 progress(_bundling, count[0],
2106 progress(_bundling, count[0],
2107 unit=_manifests, total=count[1])
2107 unit=_manifests, total=count[1])
2108 return clnode
2108 return clnode
2109 else:
2109 else:
2110 progress(_bundling, count[0], item=fstate[0],
2110 progress(_bundling, count[0], item=fstate[0],
2111 unit=_files, total=count[1])
2111 unit=_files, total=count[1])
2112 return fstate[1][x]
2112 return fstate[1][x]
2113
2113
2114 bundler = changegroup.bundle10(lookup)
2114 bundler = changegroup.bundle10(lookup)
2115 reorder = self.ui.config('bundle', 'reorder', 'auto')
2115 reorder = self.ui.config('bundle', 'reorder', 'auto')
2116 if reorder == 'auto':
2116 if reorder == 'auto':
2117 reorder = None
2117 reorder = None
2118 else:
2118 else:
2119 reorder = util.parsebool(reorder)
2119 reorder = util.parsebool(reorder)
2120
2120
2121 def gengroup():
2121 def gengroup():
2122 # Create a changenode group generator that will call our functions
2122 # Create a changenode group generator that will call our functions
2123 # back to lookup the owning changenode and collect information.
2123 # back to lookup the owning changenode and collect information.
2124 count[:] = [0, len(csets)]
2124 count[:] = [0, len(csets)]
2125 for chunk in cl.group(csets, bundler, reorder=reorder):
2125 for chunk in cl.group(csets, bundler, reorder=reorder):
2126 yield chunk
2126 yield chunk
2127 progress(_bundling, None)
2127 progress(_bundling, None)
2128
2128
2129 # Create a generator for the manifestnodes that calls our lookup
2129 # Create a generator for the manifestnodes that calls our lookup
2130 # and data collection functions back.
2130 # and data collection functions back.
2131 for f in changedfiles:
2131 for f in changedfiles:
2132 fnodes[f] = {}
2132 fnodes[f] = {}
2133 count[:] = [0, len(mfs)]
2133 count[:] = [0, len(mfs)]
2134 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2134 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2135 yield chunk
2135 yield chunk
2136 progress(_bundling, None)
2136 progress(_bundling, None)
2137
2137
2138 mfs.clear()
2138 mfs.clear()
2139
2139
2140 # Go through all our files in order sorted by name.
2140 # Go through all our files in order sorted by name.
2141 count[:] = [0, len(changedfiles)]
2141 count[:] = [0, len(changedfiles)]
2142 for fname in sorted(changedfiles):
2142 for fname in sorted(changedfiles):
2143 filerevlog = self.file(fname)
2143 filerevlog = self.file(fname)
2144 if not len(filerevlog):
2144 if not len(filerevlog):
2145 raise util.Abort(_("empty or missing revlog for %s")
2145 raise util.Abort(_("empty or missing revlog for %s")
2146 % fname)
2146 % fname)
2147 fstate[0] = fname
2147 fstate[0] = fname
2148 fstate[1] = fnodes.pop(fname, {})
2148 fstate[1] = fnodes.pop(fname, {})
2149
2149
2150 nodelist = prune(filerevlog, fstate[1])
2150 nodelist = prune(filerevlog, fstate[1])
2151 if nodelist:
2151 if nodelist:
2152 count[0] += 1
2152 count[0] += 1
2153 yield bundler.fileheader(fname)
2153 yield bundler.fileheader(fname)
2154 for chunk in filerevlog.group(nodelist, bundler, reorder):
2154 for chunk in filerevlog.group(nodelist, bundler, reorder):
2155 yield chunk
2155 yield chunk
2156
2156
2157 # Signal that no more groups are left.
2157 # Signal that no more groups are left.
2158 yield bundler.close()
2158 yield bundler.close()
2159 progress(_bundling, None)
2159 progress(_bundling, None)
2160
2160
2161 if csets:
2161 if csets:
2162 self.hook('outgoing', node=hex(csets[0]), source=source)
2162 self.hook('outgoing', node=hex(csets[0]), source=source)
2163
2163
2164 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2164 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2165
2165
2166 def changegroup(self, basenodes, source):
2166 def changegroup(self, basenodes, source):
2167 # to avoid a race we use changegroupsubset() (issue1320)
2167 # to avoid a race we use changegroupsubset() (issue1320)
2168 return self.changegroupsubset(basenodes, self.heads(), source)
2168 return self.changegroupsubset(basenodes, self.heads(), source)
2169
2169
2170 def _changegroup(self, nodes, source):
2170 def _changegroup(self, nodes, source):
2171 """Compute the changegroup of all nodes that we have that a recipient
2171 """Compute the changegroup of all nodes that we have that a recipient
2172 doesn't. Return a chunkbuffer object whose read() method will return
2172 doesn't. Return a chunkbuffer object whose read() method will return
2173 successive changegroup chunks.
2173 successive changegroup chunks.
2174
2174
2175 This is much easier than the previous function as we can assume that
2175 This is much easier than the previous function as we can assume that
2176 the recipient has any changenode we aren't sending them.
2176 the recipient has any changenode we aren't sending them.
2177
2177
2178 nodes is the set of nodes to send"""
2178 nodes is the set of nodes to send"""
2179
2179
2180 cl = self.changelog
2180 cl = self.changelog
2181 mf = self.manifest
2181 mf = self.manifest
2182 mfs = {}
2182 mfs = {}
2183 changedfiles = set()
2183 changedfiles = set()
2184 fstate = ['']
2184 fstate = ['']
2185 count = [0, 0]
2185 count = [0, 0]
2186
2186
2187 self.hook('preoutgoing', throw=True, source=source)
2187 self.hook('preoutgoing', throw=True, source=source)
2188 self.changegroupinfo(nodes, source)
2188 self.changegroupinfo(nodes, source)
2189
2189
2190 revset = set([cl.rev(n) for n in nodes])
2190 revset = set([cl.rev(n) for n in nodes])
2191
2191
2192 def gennodelst(log):
2192 def gennodelst(log):
2193 ln, llr = log.node, log.linkrev
2193 ln, llr = log.node, log.linkrev
2194 return [ln(r) for r in log if llr(r) in revset]
2194 return [ln(r) for r in log if llr(r) in revset]
2195
2195
2196 progress = self.ui.progress
2196 progress = self.ui.progress
2197 _bundling = _('bundling')
2197 _bundling = _('bundling')
2198 _changesets = _('changesets')
2198 _changesets = _('changesets')
2199 _manifests = _('manifests')
2199 _manifests = _('manifests')
2200 _files = _('files')
2200 _files = _('files')
2201
2201
2202 def lookup(revlog, x):
2202 def lookup(revlog, x):
2203 if revlog == cl:
2203 if revlog == cl:
2204 c = cl.read(x)
2204 c = cl.read(x)
2205 changedfiles.update(c[3])
2205 changedfiles.update(c[3])
2206 mfs.setdefault(c[0], x)
2206 mfs.setdefault(c[0], x)
2207 count[0] += 1
2207 count[0] += 1
2208 progress(_bundling, count[0],
2208 progress(_bundling, count[0],
2209 unit=_changesets, total=count[1])
2209 unit=_changesets, total=count[1])
2210 return x
2210 return x
2211 elif revlog == mf:
2211 elif revlog == mf:
2212 count[0] += 1
2212 count[0] += 1
2213 progress(_bundling, count[0],
2213 progress(_bundling, count[0],
2214 unit=_manifests, total=count[1])
2214 unit=_manifests, total=count[1])
2215 return cl.node(revlog.linkrev(revlog.rev(x)))
2215 return cl.node(revlog.linkrev(revlog.rev(x)))
2216 else:
2216 else:
2217 progress(_bundling, count[0], item=fstate[0],
2217 progress(_bundling, count[0], item=fstate[0],
2218 total=count[1], unit=_files)
2218 total=count[1], unit=_files)
2219 return cl.node(revlog.linkrev(revlog.rev(x)))
2219 return cl.node(revlog.linkrev(revlog.rev(x)))
2220
2220
2221 bundler = changegroup.bundle10(lookup)
2221 bundler = changegroup.bundle10(lookup)
2222 reorder = self.ui.config('bundle', 'reorder', 'auto')
2222 reorder = self.ui.config('bundle', 'reorder', 'auto')
2223 if reorder == 'auto':
2223 if reorder == 'auto':
2224 reorder = None
2224 reorder = None
2225 else:
2225 else:
2226 reorder = util.parsebool(reorder)
2226 reorder = util.parsebool(reorder)
2227
2227
2228 def gengroup():
2228 def gengroup():
2229 '''yield a sequence of changegroup chunks (strings)'''
2229 '''yield a sequence of changegroup chunks (strings)'''
2230 # construct a list of all changed files
2230 # construct a list of all changed files
2231
2231
2232 count[:] = [0, len(nodes)]
2232 count[:] = [0, len(nodes)]
2233 for chunk in cl.group(nodes, bundler, reorder=reorder):
2233 for chunk in cl.group(nodes, bundler, reorder=reorder):
2234 yield chunk
2234 yield chunk
2235 progress(_bundling, None)
2235 progress(_bundling, None)
2236
2236
2237 count[:] = [0, len(mfs)]
2237 count[:] = [0, len(mfs)]
2238 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2238 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2239 yield chunk
2239 yield chunk
2240 progress(_bundling, None)
2240 progress(_bundling, None)
2241
2241
2242 count[:] = [0, len(changedfiles)]
2242 count[:] = [0, len(changedfiles)]
2243 for fname in sorted(changedfiles):
2243 for fname in sorted(changedfiles):
2244 filerevlog = self.file(fname)
2244 filerevlog = self.file(fname)
2245 if not len(filerevlog):
2245 if not len(filerevlog):
2246 raise util.Abort(_("empty or missing revlog for %s")
2246 raise util.Abort(_("empty or missing revlog for %s")
2247 % fname)
2247 % fname)
2248 fstate[0] = fname
2248 fstate[0] = fname
2249 nodelist = gennodelst(filerevlog)
2249 nodelist = gennodelst(filerevlog)
2250 if nodelist:
2250 if nodelist:
2251 count[0] += 1
2251 count[0] += 1
2252 yield bundler.fileheader(fname)
2252 yield bundler.fileheader(fname)
2253 for chunk in filerevlog.group(nodelist, bundler, reorder):
2253 for chunk in filerevlog.group(nodelist, bundler, reorder):
2254 yield chunk
2254 yield chunk
2255 yield bundler.close()
2255 yield bundler.close()
2256 progress(_bundling, None)
2256 progress(_bundling, None)
2257
2257
2258 if nodes:
2258 if nodes:
2259 self.hook('outgoing', node=hex(nodes[0]), source=source)
2259 self.hook('outgoing', node=hex(nodes[0]), source=source)
2260
2260
2261 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2261 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2262
2262
2263 def addchangegroup(self, source, srctype, url, emptyok=False):
2263 def addchangegroup(self, source, srctype, url, emptyok=False):
2264 """Add the changegroup returned by source.read() to this repo.
2264 """Add the changegroup returned by source.read() to this repo.
2265 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2265 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2266 the URL of the repo where this changegroup is coming from.
2266 the URL of the repo where this changegroup is coming from.
2267
2267
2268 Return an integer summarizing the change to this repo:
2268 Return an integer summarizing the change to this repo:
2269 - nothing changed or no source: 0
2269 - nothing changed or no source: 0
2270 - more heads than before: 1+added heads (2..n)
2270 - more heads than before: 1+added heads (2..n)
2271 - fewer heads than before: -1-removed heads (-2..-n)
2271 - fewer heads than before: -1-removed heads (-2..-n)
2272 - number of heads stays the same: 1
2272 - number of heads stays the same: 1
2273 """
2273 """
2274 def csmap(x):
2274 def csmap(x):
2275 self.ui.debug("add changeset %s\n" % short(x))
2275 self.ui.debug("add changeset %s\n" % short(x))
2276 return len(cl)
2276 return len(cl)
2277
2277
2278 def revmap(x):
2278 def revmap(x):
2279 return cl.rev(x)
2279 return cl.rev(x)
2280
2280
2281 if not source:
2281 if not source:
2282 return 0
2282 return 0
2283
2283
2284 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2284 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2285
2285
2286 changesets = files = revisions = 0
2286 changesets = files = revisions = 0
2287 efiles = set()
2287 efiles = set()
2288
2288
2289 # write changelog data to temp files so concurrent readers will not see
2289 # write changelog data to temp files so concurrent readers will not see
2290 # inconsistent view
2290 # inconsistent view
2291 cl = self.changelog
2291 cl = self.changelog
2292 cl.delayupdate()
2292 cl.delayupdate()
2293 oldheads = cl.heads()
2293 oldheads = cl.heads()
2294
2294
2295 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2295 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2296 try:
2296 try:
2297 trp = weakref.proxy(tr)
2297 trp = weakref.proxy(tr)
2298 # pull off the changeset group
2298 # pull off the changeset group
2299 self.ui.status(_("adding changesets\n"))
2299 self.ui.status(_("adding changesets\n"))
2300 clstart = len(cl)
2300 clstart = len(cl)
2301 class prog(object):
2301 class prog(object):
2302 step = _('changesets')
2302 step = _('changesets')
2303 count = 1
2303 count = 1
2304 ui = self.ui
2304 ui = self.ui
2305 total = None
2305 total = None
2306 def __call__(self):
2306 def __call__(self):
2307 self.ui.progress(self.step, self.count, unit=_('chunks'),
2307 self.ui.progress(self.step, self.count, unit=_('chunks'),
2308 total=self.total)
2308 total=self.total)
2309 self.count += 1
2309 self.count += 1
2310 pr = prog()
2310 pr = prog()
2311 source.callback = pr
2311 source.callback = pr
2312
2312
2313 source.changelogheader()
2313 source.changelogheader()
2314 srccontent = cl.addgroup(source, csmap, trp)
2314 srccontent = cl.addgroup(source, csmap, trp)
2315 if not (srccontent or emptyok):
2315 if not (srccontent or emptyok):
2316 raise util.Abort(_("received changelog group is empty"))
2316 raise util.Abort(_("received changelog group is empty"))
2317 clend = len(cl)
2317 clend = len(cl)
2318 changesets = clend - clstart
2318 changesets = clend - clstart
2319 for c in xrange(clstart, clend):
2319 for c in xrange(clstart, clend):
2320 efiles.update(self[c].files())
2320 efiles.update(self[c].files())
2321 efiles = len(efiles)
2321 efiles = len(efiles)
2322 self.ui.progress(_('changesets'), None)
2322 self.ui.progress(_('changesets'), None)
2323
2323
2324 # pull off the manifest group
2324 # pull off the manifest group
2325 self.ui.status(_("adding manifests\n"))
2325 self.ui.status(_("adding manifests\n"))
2326 pr.step = _('manifests')
2326 pr.step = _('manifests')
2327 pr.count = 1
2327 pr.count = 1
2328 pr.total = changesets # manifests <= changesets
2328 pr.total = changesets # manifests <= changesets
2329 # no need to check for empty manifest group here:
2329 # no need to check for empty manifest group here:
2330 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2330 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2331 # no new manifest will be created and the manifest group will
2331 # no new manifest will be created and the manifest group will
2332 # be empty during the pull
2332 # be empty during the pull
2333 source.manifestheader()
2333 source.manifestheader()
2334 self.manifest.addgroup(source, revmap, trp)
2334 self.manifest.addgroup(source, revmap, trp)
2335 self.ui.progress(_('manifests'), None)
2335 self.ui.progress(_('manifests'), None)
2336
2336
2337 needfiles = {}
2337 needfiles = {}
2338 if self.ui.configbool('server', 'validate', default=False):
2338 if self.ui.configbool('server', 'validate', default=False):
2339 # validate incoming csets have their manifests
2339 # validate incoming csets have their manifests
2340 for cset in xrange(clstart, clend):
2340 for cset in xrange(clstart, clend):
2341 mfest = self.changelog.read(self.changelog.node(cset))[0]
2341 mfest = self.changelog.read(self.changelog.node(cset))[0]
2342 mfest = self.manifest.readdelta(mfest)
2342 mfest = self.manifest.readdelta(mfest)
2343 # store file nodes we must see
2343 # store file nodes we must see
2344 for f, n in mfest.iteritems():
2344 for f, n in mfest.iteritems():
2345 needfiles.setdefault(f, set()).add(n)
2345 needfiles.setdefault(f, set()).add(n)
2346
2346
2347 # process the files
2347 # process the files
2348 self.ui.status(_("adding file changes\n"))
2348 self.ui.status(_("adding file changes\n"))
2349 pr.step = _('files')
2349 pr.step = _('files')
2350 pr.count = 1
2350 pr.count = 1
2351 pr.total = efiles
2351 pr.total = efiles
2352 source.callback = None
2352 source.callback = None
2353
2353
2354 while True:
2354 while True:
2355 chunkdata = source.filelogheader()
2355 chunkdata = source.filelogheader()
2356 if not chunkdata:
2356 if not chunkdata:
2357 break
2357 break
2358 f = chunkdata["filename"]
2358 f = chunkdata["filename"]
2359 self.ui.debug("adding %s revisions\n" % f)
2359 self.ui.debug("adding %s revisions\n" % f)
2360 pr()
2360 pr()
2361 fl = self.file(f)
2361 fl = self.file(f)
2362 o = len(fl)
2362 o = len(fl)
2363 if not fl.addgroup(source, revmap, trp):
2363 if not fl.addgroup(source, revmap, trp):
2364 raise util.Abort(_("received file revlog group is empty"))
2364 raise util.Abort(_("received file revlog group is empty"))
2365 revisions += len(fl) - o
2365 revisions += len(fl) - o
2366 files += 1
2366 files += 1
2367 if f in needfiles:
2367 if f in needfiles:
2368 needs = needfiles[f]
2368 needs = needfiles[f]
2369 for new in xrange(o, len(fl)):
2369 for new in xrange(o, len(fl)):
2370 n = fl.node(new)
2370 n = fl.node(new)
2371 if n in needs:
2371 if n in needs:
2372 needs.remove(n)
2372 needs.remove(n)
2373 if not needs:
2373 if not needs:
2374 del needfiles[f]
2374 del needfiles[f]
2375 self.ui.progress(_('files'), None)
2375 self.ui.progress(_('files'), None)
2376
2376
2377 for f, needs in needfiles.iteritems():
2377 for f, needs in needfiles.iteritems():
2378 fl = self.file(f)
2378 fl = self.file(f)
2379 for n in needs:
2379 for n in needs:
2380 try:
2380 try:
2381 fl.rev(n)
2381 fl.rev(n)
2382 except error.LookupError:
2382 except error.LookupError:
2383 raise util.Abort(
2383 raise util.Abort(
2384 _('missing file data for %s:%s - run hg verify') %
2384 _('missing file data for %s:%s - run hg verify') %
2385 (f, hex(n)))
2385 (f, hex(n)))
2386
2386
2387 dh = 0
2387 dh = 0
2388 if oldheads:
2388 if oldheads:
2389 heads = cl.heads()
2389 heads = cl.heads()
2390 dh = len(heads) - len(oldheads)
2390 dh = len(heads) - len(oldheads)
2391 for h in heads:
2391 for h in heads:
2392 if h not in oldheads and self[h].closesbranch():
2392 if h not in oldheads and self[h].closesbranch():
2393 dh -= 1
2393 dh -= 1
2394 htext = ""
2394 htext = ""
2395 if dh:
2395 if dh:
2396 htext = _(" (%+d heads)") % dh
2396 htext = _(" (%+d heads)") % dh
2397
2397
2398 self.ui.status(_("added %d changesets"
2398 self.ui.status(_("added %d changesets"
2399 " with %d changes to %d files%s\n")
2399 " with %d changes to %d files%s\n")
2400 % (changesets, revisions, files, htext))
2400 % (changesets, revisions, files, htext))
2401
2401
2402 if changesets > 0:
2402 if changesets > 0:
2403 p = lambda: cl.writepending() and self.root or ""
2403 p = lambda: cl.writepending() and self.root or ""
2404 self.hook('pretxnchangegroup', throw=True,
2404 self.hook('pretxnchangegroup', throw=True,
2405 node=hex(cl.node(clstart)), source=srctype,
2405 node=hex(cl.node(clstart)), source=srctype,
2406 url=url, pending=p)
2406 url=url, pending=p)
2407
2407
2408 added = [cl.node(r) for r in xrange(clstart, clend)]
2408 added = [cl.node(r) for r in xrange(clstart, clend)]
2409 publishing = self.ui.configbool('phases', 'publish', True)
2409 publishing = self.ui.configbool('phases', 'publish', True)
2410 if srctype == 'push':
2410 if srctype == 'push':
2411 # Old server can not push the boundary themself.
2411 # Old server can not push the boundary themself.
2412 # New server won't push the boundary if changeset already
2412 # New server won't push the boundary if changeset already
2413 # existed locally as secrete
2413 # existed locally as secrete
2414 #
2414 #
2415 # We should not use added here but the list of all change in
2415 # We should not use added here but the list of all change in
2416 # the bundle
2416 # the bundle
2417 if publishing:
2417 if publishing:
2418 phases.advanceboundary(self, phases.public, srccontent)
2418 phases.advanceboundary(self, phases.public, srccontent)
2419 else:
2419 else:
2420 phases.advanceboundary(self, phases.draft, srccontent)
2420 phases.advanceboundary(self, phases.draft, srccontent)
2421 phases.retractboundary(self, phases.draft, added)
2421 phases.retractboundary(self, phases.draft, added)
2422 elif srctype != 'strip':
2422 elif srctype != 'strip':
2423 # publishing only alter behavior during push
2423 # publishing only alter behavior during push
2424 #
2424 #
2425 # strip should not touch boundary at all
2425 # strip should not touch boundary at all
2426 phases.retractboundary(self, phases.draft, added)
2426 phases.retractboundary(self, phases.draft, added)
2427
2427
2428 # make changelog see real files again
2428 # make changelog see real files again
2429 cl.finalize(trp)
2429 cl.finalize(trp)
2430
2430
2431 tr.close()
2431 tr.close()
2432
2432
2433 if changesets > 0:
2433 if changesets > 0:
2434 def runhooks():
2434 def runhooks():
2435 # forcefully update the on-disk branch cache
2435 # forcefully update the on-disk branch cache
2436 self.ui.debug("updating the branch cache\n")
2436 self.ui.debug("updating the branch cache\n")
2437 self.updatebranchcache()
2437 self.updatebranchcache()
2438 self.hook("changegroup", node=hex(cl.node(clstart)),
2438 self.hook("changegroup", node=hex(cl.node(clstart)),
2439 source=srctype, url=url)
2439 source=srctype, url=url)
2440
2440
2441 for n in added:
2441 for n in added:
2442 self.hook("incoming", node=hex(n), source=srctype,
2442 self.hook("incoming", node=hex(n), source=srctype,
2443 url=url)
2443 url=url)
2444 self._afterlock(runhooks)
2444 self._afterlock(runhooks)
2445
2445
2446 finally:
2446 finally:
2447 tr.release()
2447 tr.release()
2448 # never return 0 here:
2448 # never return 0 here:
2449 if dh < 0:
2449 if dh < 0:
2450 return dh - 1
2450 return dh - 1
2451 else:
2451 else:
2452 return dh + 1
2452 return dh + 1
2453
2453
2454 def stream_in(self, remote, requirements):
2454 def stream_in(self, remote, requirements):
2455 lock = self.lock()
2455 lock = self.lock()
2456 try:
2456 try:
2457 fp = remote.stream_out()
2457 fp = remote.stream_out()
2458 l = fp.readline()
2458 l = fp.readline()
2459 try:
2459 try:
2460 resp = int(l)
2460 resp = int(l)
2461 except ValueError:
2461 except ValueError:
2462 raise error.ResponseError(
2462 raise error.ResponseError(
2463 _('unexpected response from remote server:'), l)
2463 _('unexpected response from remote server:'), l)
2464 if resp == 1:
2464 if resp == 1:
2465 raise util.Abort(_('operation forbidden by server'))
2465 raise util.Abort(_('operation forbidden by server'))
2466 elif resp == 2:
2466 elif resp == 2:
2467 raise util.Abort(_('locking the remote repository failed'))
2467 raise util.Abort(_('locking the remote repository failed'))
2468 elif resp != 0:
2468 elif resp != 0:
2469 raise util.Abort(_('the server sent an unknown error code'))
2469 raise util.Abort(_('the server sent an unknown error code'))
2470 self.ui.status(_('streaming all changes\n'))
2470 self.ui.status(_('streaming all changes\n'))
2471 l = fp.readline()
2471 l = fp.readline()
2472 try:
2472 try:
2473 total_files, total_bytes = map(int, l.split(' ', 1))
2473 total_files, total_bytes = map(int, l.split(' ', 1))
2474 except (ValueError, TypeError):
2474 except (ValueError, TypeError):
2475 raise error.ResponseError(
2475 raise error.ResponseError(
2476 _('unexpected response from remote server:'), l)
2476 _('unexpected response from remote server:'), l)
2477 self.ui.status(_('%d files to transfer, %s of data\n') %
2477 self.ui.status(_('%d files to transfer, %s of data\n') %
2478 (total_files, util.bytecount(total_bytes)))
2478 (total_files, util.bytecount(total_bytes)))
2479 handled_bytes = 0
2479 handled_bytes = 0
2480 self.ui.progress(_('clone'), 0, total=total_bytes)
2480 self.ui.progress(_('clone'), 0, total=total_bytes)
2481 start = time.time()
2481 start = time.time()
2482 for i in xrange(total_files):
2482 for i in xrange(total_files):
2483 # XXX doesn't support '\n' or '\r' in filenames
2483 # XXX doesn't support '\n' or '\r' in filenames
2484 l = fp.readline()
2484 l = fp.readline()
2485 try:
2485 try:
2486 name, size = l.split('\0', 1)
2486 name, size = l.split('\0', 1)
2487 size = int(size)
2487 size = int(size)
2488 except (ValueError, TypeError):
2488 except (ValueError, TypeError):
2489 raise error.ResponseError(
2489 raise error.ResponseError(
2490 _('unexpected response from remote server:'), l)
2490 _('unexpected response from remote server:'), l)
2491 if self.ui.debugflag:
2491 if self.ui.debugflag:
2492 self.ui.debug('adding %s (%s)\n' %
2492 self.ui.debug('adding %s (%s)\n' %
2493 (name, util.bytecount(size)))
2493 (name, util.bytecount(size)))
2494 # for backwards compat, name was partially encoded
2494 # for backwards compat, name was partially encoded
2495 ofp = self.sopener(store.decodedir(name), 'w')
2495 ofp = self.sopener(store.decodedir(name), 'w')
2496 for chunk in util.filechunkiter(fp, limit=size):
2496 for chunk in util.filechunkiter(fp, limit=size):
2497 handled_bytes += len(chunk)
2497 handled_bytes += len(chunk)
2498 self.ui.progress(_('clone'), handled_bytes,
2498 self.ui.progress(_('clone'), handled_bytes,
2499 total=total_bytes)
2499 total=total_bytes)
2500 ofp.write(chunk)
2500 ofp.write(chunk)
2501 ofp.close()
2501 ofp.close()
2502 elapsed = time.time() - start
2502 elapsed = time.time() - start
2503 if elapsed <= 0:
2503 if elapsed <= 0:
2504 elapsed = 0.001
2504 elapsed = 0.001
2505 self.ui.progress(_('clone'), None)
2505 self.ui.progress(_('clone'), None)
2506 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2506 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2507 (util.bytecount(total_bytes), elapsed,
2507 (util.bytecount(total_bytes), elapsed,
2508 util.bytecount(total_bytes / elapsed)))
2508 util.bytecount(total_bytes / elapsed)))
2509
2509
2510 # new requirements = old non-format requirements +
2510 # new requirements = old non-format requirements +
2511 # new format-related
2511 # new format-related
2512 # requirements from the streamed-in repository
2512 # requirements from the streamed-in repository
2513 requirements.update(set(self.requirements) - self.supportedformats)
2513 requirements.update(set(self.requirements) - self.supportedformats)
2514 self._applyrequirements(requirements)
2514 self._applyrequirements(requirements)
2515 self._writerequirements()
2515 self._writerequirements()
2516
2516
2517 self.invalidate()
2517 self.invalidate()
2518 return len(self.heads()) + 1
2518 return len(self.heads()) + 1
2519 finally:
2519 finally:
2520 lock.release()
2520 lock.release()
2521
2521
2522 def clone(self, remote, heads=[], stream=False):
2522 def clone(self, remote, heads=[], stream=False):
2523 '''clone remote repository.
2523 '''clone remote repository.
2524
2524
2525 keyword arguments:
2525 keyword arguments:
2526 heads: list of revs to clone (forces use of pull)
2526 heads: list of revs to clone (forces use of pull)
2527 stream: use streaming clone if possible'''
2527 stream: use streaming clone if possible'''
2528
2528
2529 # now, all clients that can request uncompressed clones can
2529 # now, all clients that can request uncompressed clones can
2530 # read repo formats supported by all servers that can serve
2530 # read repo formats supported by all servers that can serve
2531 # them.
2531 # them.
2532
2532
2533 # if revlog format changes, client will have to check version
2533 # if revlog format changes, client will have to check version
2534 # and format flags on "stream" capability, and use
2534 # and format flags on "stream" capability, and use
2535 # uncompressed only if compatible.
2535 # uncompressed only if compatible.
2536
2536
2537 if not stream:
2537 if not stream:
2538 # if the server explicitely prefer to stream (for fast LANs)
2538 # if the server explicitely prefer to stream (for fast LANs)
2539 stream = remote.capable('stream-preferred')
2539 stream = remote.capable('stream-preferred')
2540
2540
2541 if stream and not heads:
2541 if stream and not heads:
2542 # 'stream' means remote revlog format is revlogv1 only
2542 # 'stream' means remote revlog format is revlogv1 only
2543 if remote.capable('stream'):
2543 if remote.capable('stream'):
2544 return self.stream_in(remote, set(('revlogv1',)))
2544 return self.stream_in(remote, set(('revlogv1',)))
2545 # otherwise, 'streamreqs' contains the remote revlog format
2545 # otherwise, 'streamreqs' contains the remote revlog format
2546 streamreqs = remote.capable('streamreqs')
2546 streamreqs = remote.capable('streamreqs')
2547 if streamreqs:
2547 if streamreqs:
2548 streamreqs = set(streamreqs.split(','))
2548 streamreqs = set(streamreqs.split(','))
2549 # if we support it, stream in and adjust our requirements
2549 # if we support it, stream in and adjust our requirements
2550 if not streamreqs - self.supportedformats:
2550 if not streamreqs - self.supportedformats:
2551 return self.stream_in(remote, streamreqs)
2551 return self.stream_in(remote, streamreqs)
2552 return self.pull(remote, heads)
2552 return self.pull(remote, heads)
2553
2553
2554 def pushkey(self, namespace, key, old, new):
2554 def pushkey(self, namespace, key, old, new):
2555 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2555 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2556 old=old, new=new)
2556 old=old, new=new)
2557 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2557 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2558 ret = pushkey.push(self, namespace, key, old, new)
2558 ret = pushkey.push(self, namespace, key, old, new)
2559 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2559 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2560 ret=ret)
2560 ret=ret)
2561 return ret
2561 return ret
2562
2562
2563 def listkeys(self, namespace):
2563 def listkeys(self, namespace):
2564 self.hook('prelistkeys', throw=True, namespace=namespace)
2564 self.hook('prelistkeys', throw=True, namespace=namespace)
2565 self.ui.debug('listing keys for "%s"\n' % namespace)
2565 self.ui.debug('listing keys for "%s"\n' % namespace)
2566 values = pushkey.list(self, namespace)
2566 values = pushkey.list(self, namespace)
2567 self.hook('listkeys', namespace=namespace, values=values)
2567 self.hook('listkeys', namespace=namespace, values=values)
2568 return values
2568 return values
2569
2569
2570 def debugwireargs(self, one, two, three=None, four=None, five=None):
2570 def debugwireargs(self, one, two, three=None, four=None, five=None):
2571 '''used to test argument passing over the wire'''
2571 '''used to test argument passing over the wire'''
2572 return "%s %s %s %s %s" % (one, two, three, four, five)
2572 return "%s %s %s %s %s" % (one, two, three, four, five)
2573
2573
2574 def savecommitmessage(self, text):
2574 def savecommitmessage(self, text):
2575 fp = self.opener('last-message.txt', 'wb')
2575 fp = self.opener('last-message.txt', 'wb')
2576 try:
2576 try:
2577 fp.write(text)
2577 fp.write(text)
2578 finally:
2578 finally:
2579 fp.close()
2579 fp.close()
2580 return self.pathto(fp.name[len(self.root)+1:])
2580 return self.pathto(fp.name[len(self.root)+1:])
2581
2581
2582 # used to avoid circular references so destructors work
2582 # used to avoid circular references so destructors work
2583 def aftertrans(files):
2583 def aftertrans(files):
2584 renamefiles = [tuple(t) for t in files]
2584 renamefiles = [tuple(t) for t in files]
2585 def a():
2585 def a():
2586 for src, dest in renamefiles:
2586 for src, dest in renamefiles:
2587 try:
2587 try:
2588 util.rename(src, dest)
2588 util.rename(src, dest)
2589 except OSError: # journal file does not yet exist
2589 except OSError: # journal file does not yet exist
2590 pass
2590 pass
2591 return a
2591 return a
2592
2592
2593 def undoname(fn):
2593 def undoname(fn):
2594 base, name = os.path.split(fn)
2594 base, name = os.path.split(fn)
2595 assert name.startswith('journal')
2595 assert name.startswith('journal')
2596 return os.path.join(base, name.replace('journal', 'undo', 1))
2596 return os.path.join(base, name.replace('journal', 'undo', 1))
2597
2597
2598 def instance(ui, path, create):
2598 def instance(ui, path, create):
2599 return localrepository(ui, util.urllocalpath(path), create)
2599 return localrepository(ui, util.urllocalpath(path), create)
2600
2600
2601 def islocal(path):
2601 def islocal(path):
2602 return True
2602 return True
@@ -1,508 +1,508 b''
1 $ cat >> $HGRCPATH << EOF
1 $ cat >> $HGRCPATH << EOF
2 > [extensions]
2 > [extensions]
3 > graphlog=
3 > graphlog=
4 > [phases]
4 > [phases]
5 > # public changeset are not obsolete
5 > # public changeset are not obsolete
6 > publish=false
6 > publish=false
7 > EOF
7 > EOF
8 $ mkcommit() {
8 $ mkcommit() {
9 > echo "$1" > "$1"
9 > echo "$1" > "$1"
10 > hg add "$1"
10 > hg add "$1"
11 > hg ci -m "add $1"
11 > hg ci -m "add $1"
12 > }
12 > }
13 $ getid() {
13 $ getid() {
14 > hg id --debug -ir "desc('$1')"
14 > hg id --debug -ir "desc('$1')"
15 > }
15 > }
16
16
17 $ cat > debugkeys.py <<EOF
17 $ cat > debugkeys.py <<EOF
18 > def reposetup(ui, repo):
18 > def reposetup(ui, repo):
19 > class debugkeysrepo(repo.__class__):
19 > class debugkeysrepo(repo.__class__):
20 > def listkeys(self, namespace):
20 > def listkeys(self, namespace):
21 > ui.write('listkeys %s\n' % (namespace,))
21 > ui.write('listkeys %s\n' % (namespace,))
22 > return super(debugkeysrepo, self).listkeys(namespace)
22 > return super(debugkeysrepo, self).listkeys(namespace)
23 >
23 >
24 > if repo.local():
24 > if repo.local():
25 > repo.__class__ = debugkeysrepo
25 > repo.__class__ = debugkeysrepo
26 > EOF
26 > EOF
27
27
28 $ hg init tmpa
28 $ hg init tmpa
29 $ cd tmpa
29 $ cd tmpa
30 $ mkcommit kill_me
30 $ mkcommit kill_me
31
31
32 Checking that the feature is properly disabled
32 Checking that the feature is properly disabled
33
33
34 $ hg debugobsolete -d '0 0' `getid kill_me` -u babar
34 $ hg debugobsolete -d '0 0' `getid kill_me` -u babar
35 abort: obsolete feature is not enabled on this repo
35 abort: obsolete feature is not enabled on this repo
36 [255]
36 [255]
37
37
38 Enabling it
38 Enabling it
39
39
40 $ cat > ../obs.py << EOF
40 $ cat > ../obs.py << EOF
41 > import mercurial.obsolete
41 > import mercurial.obsolete
42 > mercurial.obsolete._enabled = True
42 > mercurial.obsolete._enabled = True
43 > EOF
43 > EOF
44 $ echo '[extensions]' >> $HGRCPATH
44 $ echo '[extensions]' >> $HGRCPATH
45 $ echo "obs=${TESTTMP}/obs.py" >> $HGRCPATH
45 $ echo "obs=${TESTTMP}/obs.py" >> $HGRCPATH
46
46
47 Killing a single changeset without replacement
47 Killing a single changeset without replacement
48
48
49 $ hg debugobsolete 0
49 $ hg debugobsolete 0
50 abort: changeset references must be full hexadecimal node identifiers
50 abort: changeset references must be full hexadecimal node identifiers
51 [255]
51 [255]
52 $ hg debugobsolete '00'
52 $ hg debugobsolete '00'
53 abort: changeset references must be full hexadecimal node identifiers
53 abort: changeset references must be full hexadecimal node identifiers
54 [255]
54 [255]
55 $ hg debugobsolete -d '0 0' `getid kill_me` -u babar
55 $ hg debugobsolete -d '0 0' `getid kill_me` -u babar
56 $ hg debugobsolete
56 $ hg debugobsolete
57 97b7c2d76b1845ed3eb988cd612611e72406cef0 0 {'date': '0 0', 'user': 'babar'}
57 97b7c2d76b1845ed3eb988cd612611e72406cef0 0 {'date': '0 0', 'user': 'babar'}
58 $ cd ..
58 $ cd ..
59
59
60 Killing a single changeset with replacement
60 Killing a single changeset with replacement
61
61
62 $ hg init tmpb
62 $ hg init tmpb
63 $ cd tmpb
63 $ cd tmpb
64 $ mkcommit a
64 $ mkcommit a
65 $ mkcommit b
65 $ mkcommit b
66 $ mkcommit original_c
66 $ mkcommit original_c
67 $ hg up "desc('b')"
67 $ hg up "desc('b')"
68 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
68 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
69 $ mkcommit new_c
69 $ mkcommit new_c
70 created new head
70 created new head
71 $ hg debugobsolete `getid original_c` `getid new_c` -d '56 12'
71 $ hg debugobsolete `getid original_c` `getid new_c` -d '56 12'
72 $ hg debugobsolete
72 $ hg debugobsolete
73 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
73 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
74
74
75 do it again (it read the obsstore before adding new changeset)
75 do it again (it read the obsstore before adding new changeset)
76
76
77 $ hg up '.^'
77 $ hg up '.^'
78 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
78 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
79 $ mkcommit new_2_c
79 $ mkcommit new_2_c
80 created new head
80 created new head
81 $ hg debugobsolete -d '1337 0' `getid new_c` `getid new_2_c`
81 $ hg debugobsolete -d '1337 0' `getid new_c` `getid new_2_c`
82 $ hg debugobsolete
82 $ hg debugobsolete
83 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
83 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
84 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
84 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
85
85
86 Register two markers with a missing node
86 Register two markers with a missing node
87
87
88 $ hg up '.^'
88 $ hg up '.^'
89 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
89 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
90 $ mkcommit new_3_c
90 $ mkcommit new_3_c
91 created new head
91 created new head
92 $ hg debugobsolete -d '1338 0' `getid new_2_c` 1337133713371337133713371337133713371337
92 $ hg debugobsolete -d '1338 0' `getid new_2_c` 1337133713371337133713371337133713371337
93 $ hg debugobsolete -d '1339 0' 1337133713371337133713371337133713371337 `getid new_3_c`
93 $ hg debugobsolete -d '1339 0' 1337133713371337133713371337133713371337 `getid new_3_c`
94 $ hg debugobsolete
94 $ hg debugobsolete
95 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
95 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
96 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
96 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
97 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
97 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
98 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
98 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
99
99
100 Check that graphlog detect that a changeset is obsolete:
100 Check that graphlog detect that a changeset is obsolete:
101
101
102 $ hg glog
102 $ hg glog
103 @ changeset: 5:5601fb93a350
103 @ changeset: 5:5601fb93a350
104 | tag: tip
104 | tag: tip
105 | parent: 1:7c3bad9141dc
105 | parent: 1:7c3bad9141dc
106 | user: test
106 | user: test
107 | date: Thu Jan 01 00:00:00 1970 +0000
107 | date: Thu Jan 01 00:00:00 1970 +0000
108 | summary: add new_3_c
108 | summary: add new_3_c
109 |
109 |
110 o changeset: 1:7c3bad9141dc
110 o changeset: 1:7c3bad9141dc
111 | user: test
111 | user: test
112 | date: Thu Jan 01 00:00:00 1970 +0000
112 | date: Thu Jan 01 00:00:00 1970 +0000
113 | summary: add b
113 | summary: add b
114 |
114 |
115 o changeset: 0:1f0dee641bb7
115 o changeset: 0:1f0dee641bb7
116 user: test
116 user: test
117 date: Thu Jan 01 00:00:00 1970 +0000
117 date: Thu Jan 01 00:00:00 1970 +0000
118 summary: add a
118 summary: add a
119
119
120
120
121 Check that public changeset are not accounted as obsolete:
121 Check that public changeset are not accounted as obsolete:
122
122
123 $ hg phase --public 2
123 $ hg phase --public 2
124 $ hg --config 'extensions.graphlog=' glog
124 $ hg --config 'extensions.graphlog=' glog
125 @ changeset: 5:5601fb93a350
125 @ changeset: 5:5601fb93a350
126 | tag: tip
126 | tag: tip
127 | parent: 1:7c3bad9141dc
127 | parent: 1:7c3bad9141dc
128 | user: test
128 | user: test
129 | date: Thu Jan 01 00:00:00 1970 +0000
129 | date: Thu Jan 01 00:00:00 1970 +0000
130 | summary: add new_3_c
130 | summary: add new_3_c
131 |
131 |
132 | o changeset: 2:245bde4270cd
132 | o changeset: 2:245bde4270cd
133 |/ user: test
133 |/ user: test
134 | date: Thu Jan 01 00:00:00 1970 +0000
134 | date: Thu Jan 01 00:00:00 1970 +0000
135 | summary: add original_c
135 | summary: add original_c
136 |
136 |
137 o changeset: 1:7c3bad9141dc
137 o changeset: 1:7c3bad9141dc
138 | user: test
138 | user: test
139 | date: Thu Jan 01 00:00:00 1970 +0000
139 | date: Thu Jan 01 00:00:00 1970 +0000
140 | summary: add b
140 | summary: add b
141 |
141 |
142 o changeset: 0:1f0dee641bb7
142 o changeset: 0:1f0dee641bb7
143 user: test
143 user: test
144 date: Thu Jan 01 00:00:00 1970 +0000
144 date: Thu Jan 01 00:00:00 1970 +0000
145 summary: add a
145 summary: add a
146
146
147
147
148 $ cd ..
148 $ cd ..
149
149
150 Exchange Test
150 Exchange Test
151 ============================
151 ============================
152
152
153 Destination repo does not have any data
153 Destination repo does not have any data
154 ---------------------------------------
154 ---------------------------------------
155
155
156 Try to pull markers
156 Try to pull markers
157 (extinct changeset are excluded but marker are pushed)
157 (extinct changeset are excluded but marker are pushed)
158
158
159 $ hg init tmpc
159 $ hg init tmpc
160 $ cd tmpc
160 $ cd tmpc
161 $ hg pull ../tmpb
161 $ hg pull ../tmpb
162 pulling from ../tmpb
162 pulling from ../tmpb
163 requesting all changes
163 requesting all changes
164 adding changesets
164 adding changesets
165 adding manifests
165 adding manifests
166 adding file changes
166 adding file changes
167 added 4 changesets with 4 changes to 4 files (+1 heads)
167 added 4 changesets with 4 changes to 4 files (+1 heads)
168 (run 'hg heads' to see heads, 'hg merge' to merge)
168 (run 'hg heads' to see heads, 'hg merge' to merge)
169 $ hg debugobsolete
169 $ hg debugobsolete
170 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
170 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
171 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
171 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
172 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
172 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
173 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
173 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
174
174
175 Rollback//Transaction support
175 Rollback//Transaction support
176
176
177 $ hg debugobsolete -d '1340 0' aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
177 $ hg debugobsolete -d '1340 0' aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
178 $ hg debugobsolete
178 $ hg debugobsolete
179 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
179 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
180 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
180 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
181 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
181 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
182 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
182 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
183 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 0 {'date': '1340 0', 'user': 'test'}
183 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 0 {'date': '1340 0', 'user': 'test'}
184 $ hg rollback -n
184 $ hg rollback -n
185 repository tip rolled back to revision 3 (undo debugobsolete)
185 repository tip rolled back to revision 3 (undo debugobsolete)
186 $ hg rollback
186 $ hg rollback
187 repository tip rolled back to revision 3 (undo debugobsolete)
187 repository tip rolled back to revision 3 (undo debugobsolete)
188 $ hg debugobsolete
188 $ hg debugobsolete
189 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
189 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
190 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
190 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
191 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
191 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
192 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
192 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
193
193
194 $ cd ..
194 $ cd ..
195
195
196 Try to pull markers
196 Try to pull markers
197
197
198 $ hg init tmpd
198 $ hg init tmpd
199 $ hg -R tmpb push tmpd
199 $ hg -R tmpb push tmpd
200 pushing to tmpd
200 pushing to tmpd
201 searching for changes
201 searching for changes
202 adding changesets
202 adding changesets
203 adding manifests
203 adding manifests
204 adding file changes
204 adding file changes
205 added 4 changesets with 4 changes to 4 files (+1 heads)
205 added 4 changesets with 4 changes to 4 files (+1 heads)
206 $ hg -R tmpd debugobsolete
206 $ hg -R tmpd debugobsolete
207 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
207 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
208 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
208 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
209 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
209 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
210 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
210 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
211
211
212 Check obsolete keys are exchanged only if source has an obsolete store
212 Check obsolete keys are exchanged only if source has an obsolete store
213
213
214 $ hg init empty
214 $ hg init empty
215 $ hg --config extensions.debugkeys=debugkeys.py -R empty push tmpd
215 $ hg --config extensions.debugkeys=debugkeys.py -R empty push tmpd
216 pushing to tmpd
216 pushing to tmpd
217 no changes found
217 no changes found
218 listkeys phases
218 listkeys phases
219 listkeys bookmarks
219 listkeys bookmarks
220 [1]
220 [1]
221
221
222 clone support
222 clone support
223 (markers are copied and extinct changesets are included to allow hardlinks)
223 (markers are copied and extinct changesets are included to allow hardlinks)
224
224
225 $ hg clone tmpb clone-dest
225 $ hg clone tmpb clone-dest
226 updating to branch default
226 updating to branch default
227 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
227 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
228 $ hg -R clone-dest log -G --hidden
228 $ hg -R clone-dest log -G --hidden
229 @ changeset: 5:5601fb93a350
229 @ changeset: 5:5601fb93a350
230 | tag: tip
230 | tag: tip
231 | parent: 1:7c3bad9141dc
231 | parent: 1:7c3bad9141dc
232 | user: test
232 | user: test
233 | date: Thu Jan 01 00:00:00 1970 +0000
233 | date: Thu Jan 01 00:00:00 1970 +0000
234 | summary: add new_3_c
234 | summary: add new_3_c
235 |
235 |
236 | x changeset: 4:ca819180edb9
236 | x changeset: 4:ca819180edb9
237 |/ parent: 1:7c3bad9141dc
237 |/ parent: 1:7c3bad9141dc
238 | user: test
238 | user: test
239 | date: Thu Jan 01 00:00:00 1970 +0000
239 | date: Thu Jan 01 00:00:00 1970 +0000
240 | summary: add new_2_c
240 | summary: add new_2_c
241 |
241 |
242 | x changeset: 3:cdbce2fbb163
242 | x changeset: 3:cdbce2fbb163
243 |/ parent: 1:7c3bad9141dc
243 |/ parent: 1:7c3bad9141dc
244 | user: test
244 | user: test
245 | date: Thu Jan 01 00:00:00 1970 +0000
245 | date: Thu Jan 01 00:00:00 1970 +0000
246 | summary: add new_c
246 | summary: add new_c
247 |
247 |
248 | o changeset: 2:245bde4270cd
248 | o changeset: 2:245bde4270cd
249 |/ user: test
249 |/ user: test
250 | date: Thu Jan 01 00:00:00 1970 +0000
250 | date: Thu Jan 01 00:00:00 1970 +0000
251 | summary: add original_c
251 | summary: add original_c
252 |
252 |
253 o changeset: 1:7c3bad9141dc
253 o changeset: 1:7c3bad9141dc
254 | user: test
254 | user: test
255 | date: Thu Jan 01 00:00:00 1970 +0000
255 | date: Thu Jan 01 00:00:00 1970 +0000
256 | summary: add b
256 | summary: add b
257 |
257 |
258 o changeset: 0:1f0dee641bb7
258 o changeset: 0:1f0dee641bb7
259 user: test
259 user: test
260 date: Thu Jan 01 00:00:00 1970 +0000
260 date: Thu Jan 01 00:00:00 1970 +0000
261 summary: add a
261 summary: add a
262
262
263 $ hg -R clone-dest debugobsolete
263 $ hg -R clone-dest debugobsolete
264 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
264 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
265 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
265 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
266 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
266 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
267 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
267 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
268
268
269
269
270 Destination repo have existing data
270 Destination repo have existing data
271 ---------------------------------------
271 ---------------------------------------
272
272
273 On pull
273 On pull
274
274
275 $ hg init tmpe
275 $ hg init tmpe
276 $ cd tmpe
276 $ cd tmpe
277 $ hg debugobsolete -d '1339 0' 2448244824482448244824482448244824482448 1339133913391339133913391339133913391339
277 $ hg debugobsolete -d '1339 0' 2448244824482448244824482448244824482448 1339133913391339133913391339133913391339
278 $ hg pull ../tmpb
278 $ hg pull ../tmpb
279 pulling from ../tmpb
279 pulling from ../tmpb
280 requesting all changes
280 requesting all changes
281 adding changesets
281 adding changesets
282 adding manifests
282 adding manifests
283 adding file changes
283 adding file changes
284 added 4 changesets with 4 changes to 4 files (+1 heads)
284 added 4 changesets with 4 changes to 4 files (+1 heads)
285 (run 'hg heads' to see heads, 'hg merge' to merge)
285 (run 'hg heads' to see heads, 'hg merge' to merge)
286 $ hg debugobsolete
286 $ hg debugobsolete
287 2448244824482448244824482448244824482448 1339133913391339133913391339133913391339 0 {'date': '1339 0', 'user': 'test'}
287 2448244824482448244824482448244824482448 1339133913391339133913391339133913391339 0 {'date': '1339 0', 'user': 'test'}
288 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
288 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
289 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
289 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
290 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
290 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
291 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
291 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
292
292
293
293
294 On push
294 On push
295
295
296 $ hg push ../tmpc
296 $ hg push ../tmpc
297 pushing to ../tmpc
297 pushing to ../tmpc
298 searching for changes
298 searching for changes
299 no changes found
299 no changes found
300 [1]
300 [1]
301 $ hg -R ../tmpc debugobsolete
301 $ hg -R ../tmpc debugobsolete
302 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
302 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
303 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
303 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
304 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
304 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
305 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
305 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
306 2448244824482448244824482448244824482448 1339133913391339133913391339133913391339 0 {'date': '1339 0', 'user': 'test'}
306 2448244824482448244824482448244824482448 1339133913391339133913391339133913391339 0 {'date': '1339 0', 'user': 'test'}
307
307
308 detect outgoing obsolete and unstable
308 detect outgoing obsolete and unstable
309 ---------------------------------------
309 ---------------------------------------
310
310
311
311
312 $ hg glog
312 $ hg glog
313 o changeset: 3:5601fb93a350
313 o changeset: 3:5601fb93a350
314 | tag: tip
314 | tag: tip
315 | parent: 1:7c3bad9141dc
315 | parent: 1:7c3bad9141dc
316 | user: test
316 | user: test
317 | date: Thu Jan 01 00:00:00 1970 +0000
317 | date: Thu Jan 01 00:00:00 1970 +0000
318 | summary: add new_3_c
318 | summary: add new_3_c
319 |
319 |
320 | o changeset: 2:245bde4270cd
320 | o changeset: 2:245bde4270cd
321 |/ user: test
321 |/ user: test
322 | date: Thu Jan 01 00:00:00 1970 +0000
322 | date: Thu Jan 01 00:00:00 1970 +0000
323 | summary: add original_c
323 | summary: add original_c
324 |
324 |
325 o changeset: 1:7c3bad9141dc
325 o changeset: 1:7c3bad9141dc
326 | user: test
326 | user: test
327 | date: Thu Jan 01 00:00:00 1970 +0000
327 | date: Thu Jan 01 00:00:00 1970 +0000
328 | summary: add b
328 | summary: add b
329 |
329 |
330 o changeset: 0:1f0dee641bb7
330 o changeset: 0:1f0dee641bb7
331 user: test
331 user: test
332 date: Thu Jan 01 00:00:00 1970 +0000
332 date: Thu Jan 01 00:00:00 1970 +0000
333 summary: add a
333 summary: add a
334
334
335 $ hg up 'desc("new_3_c")'
335 $ hg up 'desc("new_3_c")'
336 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
336 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
337 $ mkcommit original_d
337 $ mkcommit original_d
338 $ mkcommit original_e
338 $ mkcommit original_e
339 $ hg debugobsolete `getid original_d` -d '0 0'
339 $ hg debugobsolete `getid original_d` -d '0 0'
340 $ hg log -r 'obsolete()'
340 $ hg log -r 'obsolete()'
341 changeset: 4:7c694bff0650
341 changeset: 4:7c694bff0650
342 user: test
342 user: test
343 date: Thu Jan 01 00:00:00 1970 +0000
343 date: Thu Jan 01 00:00:00 1970 +0000
344 summary: add original_d
344 summary: add original_d
345
345
346 $ hg glog -r '::unstable()'
346 $ hg glog -r '::unstable()'
347 @ changeset: 5:6e572121998e
347 @ changeset: 5:6e572121998e
348 | tag: tip
348 | tag: tip
349 | user: test
349 | user: test
350 | date: Thu Jan 01 00:00:00 1970 +0000
350 | date: Thu Jan 01 00:00:00 1970 +0000
351 | summary: add original_e
351 | summary: add original_e
352 |
352 |
353 x changeset: 4:7c694bff0650
353 x changeset: 4:7c694bff0650
354 | user: test
354 | user: test
355 | date: Thu Jan 01 00:00:00 1970 +0000
355 | date: Thu Jan 01 00:00:00 1970 +0000
356 | summary: add original_d
356 | summary: add original_d
357 |
357 |
358 o changeset: 3:5601fb93a350
358 o changeset: 3:5601fb93a350
359 | parent: 1:7c3bad9141dc
359 | parent: 1:7c3bad9141dc
360 | user: test
360 | user: test
361 | date: Thu Jan 01 00:00:00 1970 +0000
361 | date: Thu Jan 01 00:00:00 1970 +0000
362 | summary: add new_3_c
362 | summary: add new_3_c
363 |
363 |
364 o changeset: 1:7c3bad9141dc
364 o changeset: 1:7c3bad9141dc
365 | user: test
365 | user: test
366 | date: Thu Jan 01 00:00:00 1970 +0000
366 | date: Thu Jan 01 00:00:00 1970 +0000
367 | summary: add b
367 | summary: add b
368 |
368 |
369 o changeset: 0:1f0dee641bb7
369 o changeset: 0:1f0dee641bb7
370 user: test
370 user: test
371 date: Thu Jan 01 00:00:00 1970 +0000
371 date: Thu Jan 01 00:00:00 1970 +0000
372 summary: add a
372 summary: add a
373
373
374
374
375 refuse to push obsolete changeset
375 refuse to push obsolete changeset
376
376
377 $ hg push ../tmpc/ -r 'desc("original_d")'
377 $ hg push ../tmpc/ -r 'desc("original_d")'
378 pushing to ../tmpc/
378 pushing to ../tmpc/
379 searching for changes
379 searching for changes
380 abort: push includes an obsolete changeset: 7c694bff0650!
380 abort: push includes an obsolete changeset: 7c694bff0650!
381 [255]
381 [255]
382
382
383 refuse to push unstable changeset
383 refuse to push unstable changeset
384
384
385 $ hg push ../tmpc/
385 $ hg push ../tmpc/
386 pushing to ../tmpc/
386 pushing to ../tmpc/
387 searching for changes
387 searching for changes
388 abort: push includes an unstable changeset: 6e572121998e!
388 abort: push includes an unstable changeset: 6e572121998e!
389 [255]
389 [255]
390
390
391 Test that extinct changeset are properly detected
391 Test that extinct changeset are properly detected
392
392
393 $ hg log -r 'extinct()'
393 $ hg log -r 'extinct()'
394
394
395 Don't try to push extinct changeset
395 Don't try to push extinct changeset
396
396
397 $ hg init ../tmpf
397 $ hg init ../tmpf
398 $ hg out ../tmpf
398 $ hg out ../tmpf
399 comparing with ../tmpf
399 comparing with ../tmpf
400 searching for changes
400 searching for changes
401 changeset: 0:1f0dee641bb7
401 changeset: 0:1f0dee641bb7
402 user: test
402 user: test
403 date: Thu Jan 01 00:00:00 1970 +0000
403 date: Thu Jan 01 00:00:00 1970 +0000
404 summary: add a
404 summary: add a
405
405
406 changeset: 1:7c3bad9141dc
406 changeset: 1:7c3bad9141dc
407 user: test
407 user: test
408 date: Thu Jan 01 00:00:00 1970 +0000
408 date: Thu Jan 01 00:00:00 1970 +0000
409 summary: add b
409 summary: add b
410
410
411 changeset: 2:245bde4270cd
411 changeset: 2:245bde4270cd
412 user: test
412 user: test
413 date: Thu Jan 01 00:00:00 1970 +0000
413 date: Thu Jan 01 00:00:00 1970 +0000
414 summary: add original_c
414 summary: add original_c
415
415
416 changeset: 3:5601fb93a350
416 changeset: 3:5601fb93a350
417 parent: 1:7c3bad9141dc
417 parent: 1:7c3bad9141dc
418 user: test
418 user: test
419 date: Thu Jan 01 00:00:00 1970 +0000
419 date: Thu Jan 01 00:00:00 1970 +0000
420 summary: add new_3_c
420 summary: add new_3_c
421
421
422 changeset: 4:7c694bff0650
422 changeset: 4:7c694bff0650
423 user: test
423 user: test
424 date: Thu Jan 01 00:00:00 1970 +0000
424 date: Thu Jan 01 00:00:00 1970 +0000
425 summary: add original_d
425 summary: add original_d
426
426
427 changeset: 5:6e572121998e
427 changeset: 5:6e572121998e
428 tag: tip
428 tag: tip
429 user: test
429 user: test
430 date: Thu Jan 01 00:00:00 1970 +0000
430 date: Thu Jan 01 00:00:00 1970 +0000
431 summary: add original_e
431 summary: add original_e
432
432
433 $ hg push ../tmpf -f # -f because be push unstable too
433 $ hg push ../tmpf -f # -f because be push unstable too
434 pushing to ../tmpf
434 pushing to ../tmpf
435 searching for changes
435 searching for changes
436 adding changesets
436 adding changesets
437 adding manifests
437 adding manifests
438 adding file changes
438 adding file changes
439 added 6 changesets with 6 changes to 6 files (+1 heads)
439 added 6 changesets with 6 changes to 6 files (+1 heads)
440
440
441 no warning displayed
441 no warning displayed
442
442
443 $ hg push ../tmpf
443 $ hg push ../tmpf
444 pushing to ../tmpf
444 pushing to ../tmpf
445 searching for changes
445 searching for changes
446 no changes found
446 no changes found
447 [1]
447 [1]
448
448
449 Do not warn about new head when the new head is a successors of a remote one
449 Do not warn about new head when the new head is a successors of a remote one
450
450
451 $ hg glog
451 $ hg glog
452 @ changeset: 5:6e572121998e
452 @ changeset: 5:6e572121998e
453 | tag: tip
453 | tag: tip
454 | user: test
454 | user: test
455 | date: Thu Jan 01 00:00:00 1970 +0000
455 | date: Thu Jan 01 00:00:00 1970 +0000
456 | summary: add original_e
456 | summary: add original_e
457 |
457 |
458 x changeset: 4:7c694bff0650
458 x changeset: 4:7c694bff0650
459 | user: test
459 | user: test
460 | date: Thu Jan 01 00:00:00 1970 +0000
460 | date: Thu Jan 01 00:00:00 1970 +0000
461 | summary: add original_d
461 | summary: add original_d
462 |
462 |
463 o changeset: 3:5601fb93a350
463 o changeset: 3:5601fb93a350
464 | parent: 1:7c3bad9141dc
464 | parent: 1:7c3bad9141dc
465 | user: test
465 | user: test
466 | date: Thu Jan 01 00:00:00 1970 +0000
466 | date: Thu Jan 01 00:00:00 1970 +0000
467 | summary: add new_3_c
467 | summary: add new_3_c
468 |
468 |
469 | o changeset: 2:245bde4270cd
469 | o changeset: 2:245bde4270cd
470 |/ user: test
470 |/ user: test
471 | date: Thu Jan 01 00:00:00 1970 +0000
471 | date: Thu Jan 01 00:00:00 1970 +0000
472 | summary: add original_c
472 | summary: add original_c
473 |
473 |
474 o changeset: 1:7c3bad9141dc
474 o changeset: 1:7c3bad9141dc
475 | user: test
475 | user: test
476 | date: Thu Jan 01 00:00:00 1970 +0000
476 | date: Thu Jan 01 00:00:00 1970 +0000
477 | summary: add b
477 | summary: add b
478 |
478 |
479 o changeset: 0:1f0dee641bb7
479 o changeset: 0:1f0dee641bb7
480 user: test
480 user: test
481 date: Thu Jan 01 00:00:00 1970 +0000
481 date: Thu Jan 01 00:00:00 1970 +0000
482 summary: add a
482 summary: add a
483
483
484 $ hg up -q 'desc(new_3_c)'
484 $ hg up -q 'desc(new_3_c)'
485 $ mkcommit obsolete_e
485 $ mkcommit obsolete_e
486 created new head
486 created new head
487 $ hg debugobsolete `getid 'original_e'` `getid 'obsolete_e'`
487 $ hg debugobsolete `getid 'original_e'` `getid 'obsolete_e'`
488 $ hg push ../tmpf
488 $ hg push ../tmpf
489 pushing to ../tmpf
489 pushing to ../tmpf
490 searching for changes
490 searching for changes
491 adding changesets
491 adding changesets
492 adding manifests
492 adding manifests
493 adding file changes
493 adding file changes
494 added 1 changesets with 1 changes to 1 files (+1 heads)
494 added 1 changesets with 1 changes to 1 files (+1 heads)
495
495
496 Checking _enable=False warning if obsolete marker exist
496 Checking _enable=False warning if obsolete marker exists
497
497
498 $ echo '[extensions]' >> $HGRCPATH
498 $ echo '[extensions]' >> $HGRCPATH
499 $ echo "obs=!" >> $HGRCPATH
499 $ echo "obs=!" >> $HGRCPATH
500 $ hg log -r tip
500 $ hg log -r tip
501 obsolete feature not enabled but 7 markers found!
501 obsolete feature not enabled but 7 markers found!
502 changeset: 6:d6a026544050
502 changeset: 6:d6a026544050
503 tag: tip
503 tag: tip
504 parent: 3:5601fb93a350
504 parent: 3:5601fb93a350
505 user: test
505 user: test
506 date: Thu Jan 01 00:00:00 1970 +0000
506 date: Thu Jan 01 00:00:00 1970 +0000
507 summary: add obsolete_e
507 summary: add obsolete_e
508
508
General Comments 0
You need to be logged in to leave comments. Login now