##// END OF EJS Templates
push: refuse to push bumped changeset...
Pierre-Yves David -
r17834:743d04dd default
parent child Browse files
Show More
@@ -1,2629 +1,2632 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import bin, hex, nullid, nullrev, short
7 from node import bin, hex, nullid, nullrev, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock, transaction, store, encoding, base85
11 import lock, transaction, store, encoding, base85
12 import scmutil, util, extensions, hook, error, revset
12 import scmutil, util, extensions, hook, error, revset
13 import match as matchmod
13 import match as matchmod
14 import merge as mergemod
14 import merge as mergemod
15 import tags as tagsmod
15 import tags as tagsmod
16 from lock import release
16 from lock import release
17 import weakref, errno, os, time, inspect
17 import weakref, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19 filecache = scmutil.filecache
19 filecache = scmutil.filecache
20
20
21 class storecache(filecache):
21 class storecache(filecache):
22 """filecache for files in the store"""
22 """filecache for files in the store"""
23 def join(self, obj, fname):
23 def join(self, obj, fname):
24 return obj.sjoin(fname)
24 return obj.sjoin(fname)
25
25
26 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
26 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
27 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
27 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
28
28
29 class localpeer(peer.peerrepository):
29 class localpeer(peer.peerrepository):
30 '''peer for a local repo; reflects only the most recent API'''
30 '''peer for a local repo; reflects only the most recent API'''
31
31
32 def __init__(self, repo, caps=MODERNCAPS):
32 def __init__(self, repo, caps=MODERNCAPS):
33 peer.peerrepository.__init__(self)
33 peer.peerrepository.__init__(self)
34 self._repo = repo
34 self._repo = repo
35 self.ui = repo.ui
35 self.ui = repo.ui
36 self._caps = repo._restrictcapabilities(caps)
36 self._caps = repo._restrictcapabilities(caps)
37 self.requirements = repo.requirements
37 self.requirements = repo.requirements
38 self.supportedformats = repo.supportedformats
38 self.supportedformats = repo.supportedformats
39
39
40 def close(self):
40 def close(self):
41 self._repo.close()
41 self._repo.close()
42
42
43 def _capabilities(self):
43 def _capabilities(self):
44 return self._caps
44 return self._caps
45
45
46 def local(self):
46 def local(self):
47 return self._repo
47 return self._repo
48
48
49 def canpush(self):
49 def canpush(self):
50 return True
50 return True
51
51
52 def url(self):
52 def url(self):
53 return self._repo.url()
53 return self._repo.url()
54
54
55 def lookup(self, key):
55 def lookup(self, key):
56 return self._repo.lookup(key)
56 return self._repo.lookup(key)
57
57
58 def branchmap(self):
58 def branchmap(self):
59 return discovery.visiblebranchmap(self._repo)
59 return discovery.visiblebranchmap(self._repo)
60
60
61 def heads(self):
61 def heads(self):
62 return discovery.visibleheads(self._repo)
62 return discovery.visibleheads(self._repo)
63
63
64 def known(self, nodes):
64 def known(self, nodes):
65 return self._repo.known(nodes)
65 return self._repo.known(nodes)
66
66
67 def getbundle(self, source, heads=None, common=None):
67 def getbundle(self, source, heads=None, common=None):
68 return self._repo.getbundle(source, heads=heads, common=common)
68 return self._repo.getbundle(source, heads=heads, common=common)
69
69
70 # TODO We might want to move the next two calls into legacypeer and add
70 # TODO We might want to move the next two calls into legacypeer and add
71 # unbundle instead.
71 # unbundle instead.
72
72
73 def lock(self):
73 def lock(self):
74 return self._repo.lock()
74 return self._repo.lock()
75
75
76 def addchangegroup(self, cg, source, url):
76 def addchangegroup(self, cg, source, url):
77 return self._repo.addchangegroup(cg, source, url)
77 return self._repo.addchangegroup(cg, source, url)
78
78
79 def pushkey(self, namespace, key, old, new):
79 def pushkey(self, namespace, key, old, new):
80 return self._repo.pushkey(namespace, key, old, new)
80 return self._repo.pushkey(namespace, key, old, new)
81
81
82 def listkeys(self, namespace):
82 def listkeys(self, namespace):
83 return self._repo.listkeys(namespace)
83 return self._repo.listkeys(namespace)
84
84
85 def debugwireargs(self, one, two, three=None, four=None, five=None):
85 def debugwireargs(self, one, two, three=None, four=None, five=None):
86 '''used to test argument passing over the wire'''
86 '''used to test argument passing over the wire'''
87 return "%s %s %s %s %s" % (one, two, three, four, five)
87 return "%s %s %s %s %s" % (one, two, three, four, five)
88
88
89 class locallegacypeer(localpeer):
89 class locallegacypeer(localpeer):
90 '''peer extension which implements legacy methods too; used for tests with
90 '''peer extension which implements legacy methods too; used for tests with
91 restricted capabilities'''
91 restricted capabilities'''
92
92
93 def __init__(self, repo):
93 def __init__(self, repo):
94 localpeer.__init__(self, repo, caps=LEGACYCAPS)
94 localpeer.__init__(self, repo, caps=LEGACYCAPS)
95
95
96 def branches(self, nodes):
96 def branches(self, nodes):
97 return self._repo.branches(nodes)
97 return self._repo.branches(nodes)
98
98
99 def between(self, pairs):
99 def between(self, pairs):
100 return self._repo.between(pairs)
100 return self._repo.between(pairs)
101
101
102 def changegroup(self, basenodes, source):
102 def changegroup(self, basenodes, source):
103 return self._repo.changegroup(basenodes, source)
103 return self._repo.changegroup(basenodes, source)
104
104
105 def changegroupsubset(self, bases, heads, source):
105 def changegroupsubset(self, bases, heads, source):
106 return self._repo.changegroupsubset(bases, heads, source)
106 return self._repo.changegroupsubset(bases, heads, source)
107
107
108 class localrepository(object):
108 class localrepository(object):
109
109
110 supportedformats = set(('revlogv1', 'generaldelta'))
110 supportedformats = set(('revlogv1', 'generaldelta'))
111 supported = supportedformats | set(('store', 'fncache', 'shared',
111 supported = supportedformats | set(('store', 'fncache', 'shared',
112 'dotencode'))
112 'dotencode'))
113 openerreqs = set(('revlogv1', 'generaldelta'))
113 openerreqs = set(('revlogv1', 'generaldelta'))
114 requirements = ['revlogv1']
114 requirements = ['revlogv1']
115
115
116 def _baserequirements(self, create):
116 def _baserequirements(self, create):
117 return self.requirements[:]
117 return self.requirements[:]
118
118
119 def __init__(self, baseui, path=None, create=False):
119 def __init__(self, baseui, path=None, create=False):
120 self.wvfs = scmutil.vfs(path, expand=True)
120 self.wvfs = scmutil.vfs(path, expand=True)
121 self.wopener = self.wvfs
121 self.wopener = self.wvfs
122 self.root = self.wvfs.base
122 self.root = self.wvfs.base
123 self.path = self.wvfs.join(".hg")
123 self.path = self.wvfs.join(".hg")
124 self.origroot = path
124 self.origroot = path
125 self.auditor = scmutil.pathauditor(self.root, self._checknested)
125 self.auditor = scmutil.pathauditor(self.root, self._checknested)
126 self.vfs = scmutil.vfs(self.path)
126 self.vfs = scmutil.vfs(self.path)
127 self.opener = self.vfs
127 self.opener = self.vfs
128 self.baseui = baseui
128 self.baseui = baseui
129 self.ui = baseui.copy()
129 self.ui = baseui.copy()
130 # A list of callback to shape the phase if no data were found.
130 # A list of callback to shape the phase if no data were found.
131 # Callback are in the form: func(repo, roots) --> processed root.
131 # Callback are in the form: func(repo, roots) --> processed root.
132 # This list it to be filled by extension during repo setup
132 # This list it to be filled by extension during repo setup
133 self._phasedefaults = []
133 self._phasedefaults = []
134 try:
134 try:
135 self.ui.readconfig(self.join("hgrc"), self.root)
135 self.ui.readconfig(self.join("hgrc"), self.root)
136 extensions.loadall(self.ui)
136 extensions.loadall(self.ui)
137 except IOError:
137 except IOError:
138 pass
138 pass
139
139
140 if not self.vfs.isdir():
140 if not self.vfs.isdir():
141 if create:
141 if create:
142 if not self.wvfs.exists():
142 if not self.wvfs.exists():
143 self.wvfs.makedirs()
143 self.wvfs.makedirs()
144 self.vfs.makedir(notindexed=True)
144 self.vfs.makedir(notindexed=True)
145 requirements = self._baserequirements(create)
145 requirements = self._baserequirements(create)
146 if self.ui.configbool('format', 'usestore', True):
146 if self.ui.configbool('format', 'usestore', True):
147 self.vfs.mkdir("store")
147 self.vfs.mkdir("store")
148 requirements.append("store")
148 requirements.append("store")
149 if self.ui.configbool('format', 'usefncache', True):
149 if self.ui.configbool('format', 'usefncache', True):
150 requirements.append("fncache")
150 requirements.append("fncache")
151 if self.ui.configbool('format', 'dotencode', True):
151 if self.ui.configbool('format', 'dotencode', True):
152 requirements.append('dotencode')
152 requirements.append('dotencode')
153 # create an invalid changelog
153 # create an invalid changelog
154 self.vfs.append(
154 self.vfs.append(
155 "00changelog.i",
155 "00changelog.i",
156 '\0\0\0\2' # represents revlogv2
156 '\0\0\0\2' # represents revlogv2
157 ' dummy changelog to prevent using the old repo layout'
157 ' dummy changelog to prevent using the old repo layout'
158 )
158 )
159 if self.ui.configbool('format', 'generaldelta', False):
159 if self.ui.configbool('format', 'generaldelta', False):
160 requirements.append("generaldelta")
160 requirements.append("generaldelta")
161 requirements = set(requirements)
161 requirements = set(requirements)
162 else:
162 else:
163 raise error.RepoError(_("repository %s not found") % path)
163 raise error.RepoError(_("repository %s not found") % path)
164 elif create:
164 elif create:
165 raise error.RepoError(_("repository %s already exists") % path)
165 raise error.RepoError(_("repository %s already exists") % path)
166 else:
166 else:
167 try:
167 try:
168 requirements = scmutil.readrequires(self.vfs, self.supported)
168 requirements = scmutil.readrequires(self.vfs, self.supported)
169 except IOError, inst:
169 except IOError, inst:
170 if inst.errno != errno.ENOENT:
170 if inst.errno != errno.ENOENT:
171 raise
171 raise
172 requirements = set()
172 requirements = set()
173
173
174 self.sharedpath = self.path
174 self.sharedpath = self.path
175 try:
175 try:
176 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
176 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
177 if not os.path.exists(s):
177 if not os.path.exists(s):
178 raise error.RepoError(
178 raise error.RepoError(
179 _('.hg/sharedpath points to nonexistent directory %s') % s)
179 _('.hg/sharedpath points to nonexistent directory %s') % s)
180 self.sharedpath = s
180 self.sharedpath = s
181 except IOError, inst:
181 except IOError, inst:
182 if inst.errno != errno.ENOENT:
182 if inst.errno != errno.ENOENT:
183 raise
183 raise
184
184
185 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
185 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
186 self.spath = self.store.path
186 self.spath = self.store.path
187 self.svfs = self.store.vfs
187 self.svfs = self.store.vfs
188 self.sopener = self.svfs
188 self.sopener = self.svfs
189 self.sjoin = self.store.join
189 self.sjoin = self.store.join
190 self.vfs.createmode = self.store.createmode
190 self.vfs.createmode = self.store.createmode
191 self._applyrequirements(requirements)
191 self._applyrequirements(requirements)
192 if create:
192 if create:
193 self._writerequirements()
193 self._writerequirements()
194
194
195
195
196 self._branchcache = None
196 self._branchcache = None
197 self._branchcachetip = None
197 self._branchcachetip = None
198 self.filterpats = {}
198 self.filterpats = {}
199 self._datafilters = {}
199 self._datafilters = {}
200 self._transref = self._lockref = self._wlockref = None
200 self._transref = self._lockref = self._wlockref = None
201
201
202 # A cache for various files under .hg/ that tracks file changes,
202 # A cache for various files under .hg/ that tracks file changes,
203 # (used by the filecache decorator)
203 # (used by the filecache decorator)
204 #
204 #
205 # Maps a property name to its util.filecacheentry
205 # Maps a property name to its util.filecacheentry
206 self._filecache = {}
206 self._filecache = {}
207
207
208 def close(self):
208 def close(self):
209 pass
209 pass
210
210
211 def _restrictcapabilities(self, caps):
211 def _restrictcapabilities(self, caps):
212 return caps
212 return caps
213
213
214 def _applyrequirements(self, requirements):
214 def _applyrequirements(self, requirements):
215 self.requirements = requirements
215 self.requirements = requirements
216 self.sopener.options = dict((r, 1) for r in requirements
216 self.sopener.options = dict((r, 1) for r in requirements
217 if r in self.openerreqs)
217 if r in self.openerreqs)
218
218
219 def _writerequirements(self):
219 def _writerequirements(self):
220 reqfile = self.opener("requires", "w")
220 reqfile = self.opener("requires", "w")
221 for r in self.requirements:
221 for r in self.requirements:
222 reqfile.write("%s\n" % r)
222 reqfile.write("%s\n" % r)
223 reqfile.close()
223 reqfile.close()
224
224
225 def _checknested(self, path):
225 def _checknested(self, path):
226 """Determine if path is a legal nested repository."""
226 """Determine if path is a legal nested repository."""
227 if not path.startswith(self.root):
227 if not path.startswith(self.root):
228 return False
228 return False
229 subpath = path[len(self.root) + 1:]
229 subpath = path[len(self.root) + 1:]
230 normsubpath = util.pconvert(subpath)
230 normsubpath = util.pconvert(subpath)
231
231
232 # XXX: Checking against the current working copy is wrong in
232 # XXX: Checking against the current working copy is wrong in
233 # the sense that it can reject things like
233 # the sense that it can reject things like
234 #
234 #
235 # $ hg cat -r 10 sub/x.txt
235 # $ hg cat -r 10 sub/x.txt
236 #
236 #
237 # if sub/ is no longer a subrepository in the working copy
237 # if sub/ is no longer a subrepository in the working copy
238 # parent revision.
238 # parent revision.
239 #
239 #
240 # However, it can of course also allow things that would have
240 # However, it can of course also allow things that would have
241 # been rejected before, such as the above cat command if sub/
241 # been rejected before, such as the above cat command if sub/
242 # is a subrepository now, but was a normal directory before.
242 # is a subrepository now, but was a normal directory before.
243 # The old path auditor would have rejected by mistake since it
243 # The old path auditor would have rejected by mistake since it
244 # panics when it sees sub/.hg/.
244 # panics when it sees sub/.hg/.
245 #
245 #
246 # All in all, checking against the working copy seems sensible
246 # All in all, checking against the working copy seems sensible
247 # since we want to prevent access to nested repositories on
247 # since we want to prevent access to nested repositories on
248 # the filesystem *now*.
248 # the filesystem *now*.
249 ctx = self[None]
249 ctx = self[None]
250 parts = util.splitpath(subpath)
250 parts = util.splitpath(subpath)
251 while parts:
251 while parts:
252 prefix = '/'.join(parts)
252 prefix = '/'.join(parts)
253 if prefix in ctx.substate:
253 if prefix in ctx.substate:
254 if prefix == normsubpath:
254 if prefix == normsubpath:
255 return True
255 return True
256 else:
256 else:
257 sub = ctx.sub(prefix)
257 sub = ctx.sub(prefix)
258 return sub.checknested(subpath[len(prefix) + 1:])
258 return sub.checknested(subpath[len(prefix) + 1:])
259 else:
259 else:
260 parts.pop()
260 parts.pop()
261 return False
261 return False
262
262
263 def peer(self):
263 def peer(self):
264 return localpeer(self) # not cached to avoid reference cycle
264 return localpeer(self) # not cached to avoid reference cycle
265
265
266 @filecache('bookmarks')
266 @filecache('bookmarks')
267 def _bookmarks(self):
267 def _bookmarks(self):
268 return bookmarks.read(self)
268 return bookmarks.read(self)
269
269
270 @filecache('bookmarks.current')
270 @filecache('bookmarks.current')
271 def _bookmarkcurrent(self):
271 def _bookmarkcurrent(self):
272 return bookmarks.readcurrent(self)
272 return bookmarks.readcurrent(self)
273
273
274 def _writebookmarks(self, marks):
274 def _writebookmarks(self, marks):
275 bookmarks.write(self)
275 bookmarks.write(self)
276
276
277 def bookmarkheads(self, bookmark):
277 def bookmarkheads(self, bookmark):
278 name = bookmark.split('@', 1)[0]
278 name = bookmark.split('@', 1)[0]
279 heads = []
279 heads = []
280 for mark, n in self._bookmarks.iteritems():
280 for mark, n in self._bookmarks.iteritems():
281 if mark.split('@', 1)[0] == name:
281 if mark.split('@', 1)[0] == name:
282 heads.append(n)
282 heads.append(n)
283 return heads
283 return heads
284
284
285 @storecache('phaseroots')
285 @storecache('phaseroots')
286 def _phasecache(self):
286 def _phasecache(self):
287 return phases.phasecache(self, self._phasedefaults)
287 return phases.phasecache(self, self._phasedefaults)
288
288
289 @storecache('obsstore')
289 @storecache('obsstore')
290 def obsstore(self):
290 def obsstore(self):
291 store = obsolete.obsstore(self.sopener)
291 store = obsolete.obsstore(self.sopener)
292 if store and not obsolete._enabled:
292 if store and not obsolete._enabled:
293 # message is rare enough to not be translated
293 # message is rare enough to not be translated
294 msg = 'obsolete feature not enabled but %i markers found!\n'
294 msg = 'obsolete feature not enabled but %i markers found!\n'
295 self.ui.warn(msg % len(list(store)))
295 self.ui.warn(msg % len(list(store)))
296 return store
296 return store
297
297
298 @propertycache
298 @propertycache
299 def hiddenrevs(self):
299 def hiddenrevs(self):
300 """hiddenrevs: revs that should be hidden by command and tools
300 """hiddenrevs: revs that should be hidden by command and tools
301
301
302 This set is carried on the repo to ease initialization and lazy
302 This set is carried on the repo to ease initialization and lazy
303 loading; it'll probably move back to changelog for efficiency and
303 loading; it'll probably move back to changelog for efficiency and
304 consistency reasons.
304 consistency reasons.
305
305
306 Note that the hiddenrevs will needs invalidations when
306 Note that the hiddenrevs will needs invalidations when
307 - a new changesets is added (possible unstable above extinct)
307 - a new changesets is added (possible unstable above extinct)
308 - a new obsolete marker is added (possible new extinct changeset)
308 - a new obsolete marker is added (possible new extinct changeset)
309
309
310 hidden changesets cannot have non-hidden descendants
310 hidden changesets cannot have non-hidden descendants
311 """
311 """
312 hidden = set()
312 hidden = set()
313 if self.obsstore:
313 if self.obsstore:
314 ### hide extinct changeset that are not accessible by any mean
314 ### hide extinct changeset that are not accessible by any mean
315 hiddenquery = 'extinct() - ::(. + bookmark())'
315 hiddenquery = 'extinct() - ::(. + bookmark())'
316 hidden.update(self.revs(hiddenquery))
316 hidden.update(self.revs(hiddenquery))
317 return hidden
317 return hidden
318
318
319 @storecache('00changelog.i')
319 @storecache('00changelog.i')
320 def changelog(self):
320 def changelog(self):
321 c = changelog.changelog(self.sopener)
321 c = changelog.changelog(self.sopener)
322 if 'HG_PENDING' in os.environ:
322 if 'HG_PENDING' in os.environ:
323 p = os.environ['HG_PENDING']
323 p = os.environ['HG_PENDING']
324 if p.startswith(self.root):
324 if p.startswith(self.root):
325 c.readpending('00changelog.i.a')
325 c.readpending('00changelog.i.a')
326 return c
326 return c
327
327
328 @storecache('00manifest.i')
328 @storecache('00manifest.i')
329 def manifest(self):
329 def manifest(self):
330 return manifest.manifest(self.sopener)
330 return manifest.manifest(self.sopener)
331
331
332 @filecache('dirstate')
332 @filecache('dirstate')
333 def dirstate(self):
333 def dirstate(self):
334 warned = [0]
334 warned = [0]
335 def validate(node):
335 def validate(node):
336 try:
336 try:
337 self.changelog.rev(node)
337 self.changelog.rev(node)
338 return node
338 return node
339 except error.LookupError:
339 except error.LookupError:
340 if not warned[0]:
340 if not warned[0]:
341 warned[0] = True
341 warned[0] = True
342 self.ui.warn(_("warning: ignoring unknown"
342 self.ui.warn(_("warning: ignoring unknown"
343 " working parent %s!\n") % short(node))
343 " working parent %s!\n") % short(node))
344 return nullid
344 return nullid
345
345
346 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
346 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
347
347
348 def __getitem__(self, changeid):
348 def __getitem__(self, changeid):
349 if changeid is None:
349 if changeid is None:
350 return context.workingctx(self)
350 return context.workingctx(self)
351 return context.changectx(self, changeid)
351 return context.changectx(self, changeid)
352
352
353 def __contains__(self, changeid):
353 def __contains__(self, changeid):
354 try:
354 try:
355 return bool(self.lookup(changeid))
355 return bool(self.lookup(changeid))
356 except error.RepoLookupError:
356 except error.RepoLookupError:
357 return False
357 return False
358
358
359 def __nonzero__(self):
359 def __nonzero__(self):
360 return True
360 return True
361
361
362 def __len__(self):
362 def __len__(self):
363 return len(self.changelog)
363 return len(self.changelog)
364
364
365 def __iter__(self):
365 def __iter__(self):
366 return iter(self.changelog)
366 return iter(self.changelog)
367
367
368 def revs(self, expr, *args):
368 def revs(self, expr, *args):
369 '''Return a list of revisions matching the given revset'''
369 '''Return a list of revisions matching the given revset'''
370 expr = revset.formatspec(expr, *args)
370 expr = revset.formatspec(expr, *args)
371 m = revset.match(None, expr)
371 m = revset.match(None, expr)
372 return [r for r in m(self, list(self))]
372 return [r for r in m(self, list(self))]
373
373
374 def set(self, expr, *args):
374 def set(self, expr, *args):
375 '''
375 '''
376 Yield a context for each matching revision, after doing arg
376 Yield a context for each matching revision, after doing arg
377 replacement via revset.formatspec
377 replacement via revset.formatspec
378 '''
378 '''
379 for r in self.revs(expr, *args):
379 for r in self.revs(expr, *args):
380 yield self[r]
380 yield self[r]
381
381
382 def url(self):
382 def url(self):
383 return 'file:' + self.root
383 return 'file:' + self.root
384
384
385 def hook(self, name, throw=False, **args):
385 def hook(self, name, throw=False, **args):
386 return hook.hook(self.ui, self, name, throw, **args)
386 return hook.hook(self.ui, self, name, throw, **args)
387
387
388 def _tag(self, names, node, message, local, user, date, extra={}):
388 def _tag(self, names, node, message, local, user, date, extra={}):
389 if isinstance(names, str):
389 if isinstance(names, str):
390 names = (names,)
390 names = (names,)
391
391
392 branches = self.branchmap()
392 branches = self.branchmap()
393 for name in names:
393 for name in names:
394 self.hook('pretag', throw=True, node=hex(node), tag=name,
394 self.hook('pretag', throw=True, node=hex(node), tag=name,
395 local=local)
395 local=local)
396 if name in branches:
396 if name in branches:
397 self.ui.warn(_("warning: tag %s conflicts with existing"
397 self.ui.warn(_("warning: tag %s conflicts with existing"
398 " branch name\n") % name)
398 " branch name\n") % name)
399
399
400 def writetags(fp, names, munge, prevtags):
400 def writetags(fp, names, munge, prevtags):
401 fp.seek(0, 2)
401 fp.seek(0, 2)
402 if prevtags and prevtags[-1] != '\n':
402 if prevtags and prevtags[-1] != '\n':
403 fp.write('\n')
403 fp.write('\n')
404 for name in names:
404 for name in names:
405 m = munge and munge(name) or name
405 m = munge and munge(name) or name
406 if (self._tagscache.tagtypes and
406 if (self._tagscache.tagtypes and
407 name in self._tagscache.tagtypes):
407 name in self._tagscache.tagtypes):
408 old = self.tags().get(name, nullid)
408 old = self.tags().get(name, nullid)
409 fp.write('%s %s\n' % (hex(old), m))
409 fp.write('%s %s\n' % (hex(old), m))
410 fp.write('%s %s\n' % (hex(node), m))
410 fp.write('%s %s\n' % (hex(node), m))
411 fp.close()
411 fp.close()
412
412
413 prevtags = ''
413 prevtags = ''
414 if local:
414 if local:
415 try:
415 try:
416 fp = self.opener('localtags', 'r+')
416 fp = self.opener('localtags', 'r+')
417 except IOError:
417 except IOError:
418 fp = self.opener('localtags', 'a')
418 fp = self.opener('localtags', 'a')
419 else:
419 else:
420 prevtags = fp.read()
420 prevtags = fp.read()
421
421
422 # local tags are stored in the current charset
422 # local tags are stored in the current charset
423 writetags(fp, names, None, prevtags)
423 writetags(fp, names, None, prevtags)
424 for name in names:
424 for name in names:
425 self.hook('tag', node=hex(node), tag=name, local=local)
425 self.hook('tag', node=hex(node), tag=name, local=local)
426 return
426 return
427
427
428 try:
428 try:
429 fp = self.wfile('.hgtags', 'rb+')
429 fp = self.wfile('.hgtags', 'rb+')
430 except IOError, e:
430 except IOError, e:
431 if e.errno != errno.ENOENT:
431 if e.errno != errno.ENOENT:
432 raise
432 raise
433 fp = self.wfile('.hgtags', 'ab')
433 fp = self.wfile('.hgtags', 'ab')
434 else:
434 else:
435 prevtags = fp.read()
435 prevtags = fp.read()
436
436
437 # committed tags are stored in UTF-8
437 # committed tags are stored in UTF-8
438 writetags(fp, names, encoding.fromlocal, prevtags)
438 writetags(fp, names, encoding.fromlocal, prevtags)
439
439
440 fp.close()
440 fp.close()
441
441
442 self.invalidatecaches()
442 self.invalidatecaches()
443
443
444 if '.hgtags' not in self.dirstate:
444 if '.hgtags' not in self.dirstate:
445 self[None].add(['.hgtags'])
445 self[None].add(['.hgtags'])
446
446
447 m = matchmod.exact(self.root, '', ['.hgtags'])
447 m = matchmod.exact(self.root, '', ['.hgtags'])
448 tagnode = self.commit(message, user, date, extra=extra, match=m)
448 tagnode = self.commit(message, user, date, extra=extra, match=m)
449
449
450 for name in names:
450 for name in names:
451 self.hook('tag', node=hex(node), tag=name, local=local)
451 self.hook('tag', node=hex(node), tag=name, local=local)
452
452
453 return tagnode
453 return tagnode
454
454
455 def tag(self, names, node, message, local, user, date):
455 def tag(self, names, node, message, local, user, date):
456 '''tag a revision with one or more symbolic names.
456 '''tag a revision with one or more symbolic names.
457
457
458 names is a list of strings or, when adding a single tag, names may be a
458 names is a list of strings or, when adding a single tag, names may be a
459 string.
459 string.
460
460
461 if local is True, the tags are stored in a per-repository file.
461 if local is True, the tags are stored in a per-repository file.
462 otherwise, they are stored in the .hgtags file, and a new
462 otherwise, they are stored in the .hgtags file, and a new
463 changeset is committed with the change.
463 changeset is committed with the change.
464
464
465 keyword arguments:
465 keyword arguments:
466
466
467 local: whether to store tags in non-version-controlled file
467 local: whether to store tags in non-version-controlled file
468 (default False)
468 (default False)
469
469
470 message: commit message to use if committing
470 message: commit message to use if committing
471
471
472 user: name of user to use if committing
472 user: name of user to use if committing
473
473
474 date: date tuple to use if committing'''
474 date: date tuple to use if committing'''
475
475
476 if not local:
476 if not local:
477 for x in self.status()[:5]:
477 for x in self.status()[:5]:
478 if '.hgtags' in x:
478 if '.hgtags' in x:
479 raise util.Abort(_('working copy of .hgtags is changed '
479 raise util.Abort(_('working copy of .hgtags is changed '
480 '(please commit .hgtags manually)'))
480 '(please commit .hgtags manually)'))
481
481
482 self.tags() # instantiate the cache
482 self.tags() # instantiate the cache
483 self._tag(names, node, message, local, user, date)
483 self._tag(names, node, message, local, user, date)
484
484
485 @propertycache
485 @propertycache
486 def _tagscache(self):
486 def _tagscache(self):
487 '''Returns a tagscache object that contains various tags related
487 '''Returns a tagscache object that contains various tags related
488 caches.'''
488 caches.'''
489
489
490 # This simplifies its cache management by having one decorated
490 # This simplifies its cache management by having one decorated
491 # function (this one) and the rest simply fetch things from it.
491 # function (this one) and the rest simply fetch things from it.
492 class tagscache(object):
492 class tagscache(object):
493 def __init__(self):
493 def __init__(self):
494 # These two define the set of tags for this repository. tags
494 # These two define the set of tags for this repository. tags
495 # maps tag name to node; tagtypes maps tag name to 'global' or
495 # maps tag name to node; tagtypes maps tag name to 'global' or
496 # 'local'. (Global tags are defined by .hgtags across all
496 # 'local'. (Global tags are defined by .hgtags across all
497 # heads, and local tags are defined in .hg/localtags.)
497 # heads, and local tags are defined in .hg/localtags.)
498 # They constitute the in-memory cache of tags.
498 # They constitute the in-memory cache of tags.
499 self.tags = self.tagtypes = None
499 self.tags = self.tagtypes = None
500
500
501 self.nodetagscache = self.tagslist = None
501 self.nodetagscache = self.tagslist = None
502
502
503 cache = tagscache()
503 cache = tagscache()
504 cache.tags, cache.tagtypes = self._findtags()
504 cache.tags, cache.tagtypes = self._findtags()
505
505
506 return cache
506 return cache
507
507
508 def tags(self):
508 def tags(self):
509 '''return a mapping of tag to node'''
509 '''return a mapping of tag to node'''
510 t = {}
510 t = {}
511 if self.changelog.filteredrevs:
511 if self.changelog.filteredrevs:
512 tags, tt = self._findtags()
512 tags, tt = self._findtags()
513 else:
513 else:
514 tags = self._tagscache.tags
514 tags = self._tagscache.tags
515 for k, v in tags.iteritems():
515 for k, v in tags.iteritems():
516 try:
516 try:
517 # ignore tags to unknown nodes
517 # ignore tags to unknown nodes
518 self.changelog.rev(v)
518 self.changelog.rev(v)
519 t[k] = v
519 t[k] = v
520 except (error.LookupError, ValueError):
520 except (error.LookupError, ValueError):
521 pass
521 pass
522 return t
522 return t
523
523
524 def _findtags(self):
524 def _findtags(self):
525 '''Do the hard work of finding tags. Return a pair of dicts
525 '''Do the hard work of finding tags. Return a pair of dicts
526 (tags, tagtypes) where tags maps tag name to node, and tagtypes
526 (tags, tagtypes) where tags maps tag name to node, and tagtypes
527 maps tag name to a string like \'global\' or \'local\'.
527 maps tag name to a string like \'global\' or \'local\'.
528 Subclasses or extensions are free to add their own tags, but
528 Subclasses or extensions are free to add their own tags, but
529 should be aware that the returned dicts will be retained for the
529 should be aware that the returned dicts will be retained for the
530 duration of the localrepo object.'''
530 duration of the localrepo object.'''
531
531
532 # XXX what tagtype should subclasses/extensions use? Currently
532 # XXX what tagtype should subclasses/extensions use? Currently
533 # mq and bookmarks add tags, but do not set the tagtype at all.
533 # mq and bookmarks add tags, but do not set the tagtype at all.
534 # Should each extension invent its own tag type? Should there
534 # Should each extension invent its own tag type? Should there
535 # be one tagtype for all such "virtual" tags? Or is the status
535 # be one tagtype for all such "virtual" tags? Or is the status
536 # quo fine?
536 # quo fine?
537
537
538 alltags = {} # map tag name to (node, hist)
538 alltags = {} # map tag name to (node, hist)
539 tagtypes = {}
539 tagtypes = {}
540
540
541 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
541 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
542 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
542 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
543
543
544 # Build the return dicts. Have to re-encode tag names because
544 # Build the return dicts. Have to re-encode tag names because
545 # the tags module always uses UTF-8 (in order not to lose info
545 # the tags module always uses UTF-8 (in order not to lose info
546 # writing to the cache), but the rest of Mercurial wants them in
546 # writing to the cache), but the rest of Mercurial wants them in
547 # local encoding.
547 # local encoding.
548 tags = {}
548 tags = {}
549 for (name, (node, hist)) in alltags.iteritems():
549 for (name, (node, hist)) in alltags.iteritems():
550 if node != nullid:
550 if node != nullid:
551 tags[encoding.tolocal(name)] = node
551 tags[encoding.tolocal(name)] = node
552 tags['tip'] = self.changelog.tip()
552 tags['tip'] = self.changelog.tip()
553 tagtypes = dict([(encoding.tolocal(name), value)
553 tagtypes = dict([(encoding.tolocal(name), value)
554 for (name, value) in tagtypes.iteritems()])
554 for (name, value) in tagtypes.iteritems()])
555 return (tags, tagtypes)
555 return (tags, tagtypes)
556
556
557 def tagtype(self, tagname):
557 def tagtype(self, tagname):
558 '''
558 '''
559 return the type of the given tag. result can be:
559 return the type of the given tag. result can be:
560
560
561 'local' : a local tag
561 'local' : a local tag
562 'global' : a global tag
562 'global' : a global tag
563 None : tag does not exist
563 None : tag does not exist
564 '''
564 '''
565
565
566 return self._tagscache.tagtypes.get(tagname)
566 return self._tagscache.tagtypes.get(tagname)
567
567
568 def tagslist(self):
568 def tagslist(self):
569 '''return a list of tags ordered by revision'''
569 '''return a list of tags ordered by revision'''
570 if not self._tagscache.tagslist:
570 if not self._tagscache.tagslist:
571 l = []
571 l = []
572 for t, n in self.tags().iteritems():
572 for t, n in self.tags().iteritems():
573 r = self.changelog.rev(n)
573 r = self.changelog.rev(n)
574 l.append((r, t, n))
574 l.append((r, t, n))
575 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
575 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
576
576
577 return self._tagscache.tagslist
577 return self._tagscache.tagslist
578
578
579 def nodetags(self, node):
579 def nodetags(self, node):
580 '''return the tags associated with a node'''
580 '''return the tags associated with a node'''
581 if not self._tagscache.nodetagscache:
581 if not self._tagscache.nodetagscache:
582 nodetagscache = {}
582 nodetagscache = {}
583 for t, n in self._tagscache.tags.iteritems():
583 for t, n in self._tagscache.tags.iteritems():
584 nodetagscache.setdefault(n, []).append(t)
584 nodetagscache.setdefault(n, []).append(t)
585 for tags in nodetagscache.itervalues():
585 for tags in nodetagscache.itervalues():
586 tags.sort()
586 tags.sort()
587 self._tagscache.nodetagscache = nodetagscache
587 self._tagscache.nodetagscache = nodetagscache
588 return self._tagscache.nodetagscache.get(node, [])
588 return self._tagscache.nodetagscache.get(node, [])
589
589
590 def nodebookmarks(self, node):
590 def nodebookmarks(self, node):
591 marks = []
591 marks = []
592 for bookmark, n in self._bookmarks.iteritems():
592 for bookmark, n in self._bookmarks.iteritems():
593 if n == node:
593 if n == node:
594 marks.append(bookmark)
594 marks.append(bookmark)
595 return sorted(marks)
595 return sorted(marks)
596
596
597 def _branchtags(self, partial, lrev):
597 def _branchtags(self, partial, lrev):
598 # TODO: rename this function?
598 # TODO: rename this function?
599 tiprev = len(self) - 1
599 tiprev = len(self) - 1
600 if lrev != tiprev:
600 if lrev != tiprev:
601 ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
601 ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
602 self._updatebranchcache(partial, ctxgen)
602 self._updatebranchcache(partial, ctxgen)
603 self._writebranchcache(partial, self.changelog.tip(), tiprev)
603 self._writebranchcache(partial, self.changelog.tip(), tiprev)
604
604
605 return partial
605 return partial
606
606
607 def updatebranchcache(self):
607 def updatebranchcache(self):
608 tip = self.changelog.tip()
608 tip = self.changelog.tip()
609 if self._branchcache is not None and self._branchcachetip == tip:
609 if self._branchcache is not None and self._branchcachetip == tip:
610 return
610 return
611
611
612 oldtip = self._branchcachetip
612 oldtip = self._branchcachetip
613 self._branchcachetip = tip
613 self._branchcachetip = tip
614 if oldtip is None or oldtip not in self.changelog.nodemap:
614 if oldtip is None or oldtip not in self.changelog.nodemap:
615 partial, last, lrev = self._readbranchcache()
615 partial, last, lrev = self._readbranchcache()
616 else:
616 else:
617 lrev = self.changelog.rev(oldtip)
617 lrev = self.changelog.rev(oldtip)
618 partial = self._branchcache
618 partial = self._branchcache
619
619
620 self._branchtags(partial, lrev)
620 self._branchtags(partial, lrev)
621 # this private cache holds all heads (not just the branch tips)
621 # this private cache holds all heads (not just the branch tips)
622 self._branchcache = partial
622 self._branchcache = partial
623
623
624 def branchmap(self):
624 def branchmap(self):
625 '''returns a dictionary {branch: [branchheads]}'''
625 '''returns a dictionary {branch: [branchheads]}'''
626 if self.changelog.filteredrevs:
626 if self.changelog.filteredrevs:
627 # some changeset are excluded we can't use the cache
627 # some changeset are excluded we can't use the cache
628 branchmap = {}
628 branchmap = {}
629 self._updatebranchcache(branchmap, (self[r] for r in self))
629 self._updatebranchcache(branchmap, (self[r] for r in self))
630 return branchmap
630 return branchmap
631 else:
631 else:
632 self.updatebranchcache()
632 self.updatebranchcache()
633 return self._branchcache
633 return self._branchcache
634
634
635
635
636 def _branchtip(self, heads):
636 def _branchtip(self, heads):
637 '''return the tipmost branch head in heads'''
637 '''return the tipmost branch head in heads'''
638 tip = heads[-1]
638 tip = heads[-1]
639 for h in reversed(heads):
639 for h in reversed(heads):
640 if not self[h].closesbranch():
640 if not self[h].closesbranch():
641 tip = h
641 tip = h
642 break
642 break
643 return tip
643 return tip
644
644
645 def branchtip(self, branch):
645 def branchtip(self, branch):
646 '''return the tip node for a given branch'''
646 '''return the tip node for a given branch'''
647 if branch not in self.branchmap():
647 if branch not in self.branchmap():
648 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
648 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
649 return self._branchtip(self.branchmap()[branch])
649 return self._branchtip(self.branchmap()[branch])
650
650
651 def branchtags(self):
651 def branchtags(self):
652 '''return a dict where branch names map to the tipmost head of
652 '''return a dict where branch names map to the tipmost head of
653 the branch, open heads come before closed'''
653 the branch, open heads come before closed'''
654 bt = {}
654 bt = {}
655 for bn, heads in self.branchmap().iteritems():
655 for bn, heads in self.branchmap().iteritems():
656 bt[bn] = self._branchtip(heads)
656 bt[bn] = self._branchtip(heads)
657 return bt
657 return bt
658
658
659 def _readbranchcache(self):
659 def _readbranchcache(self):
660 partial = {}
660 partial = {}
661 try:
661 try:
662 f = self.opener("cache/branchheads")
662 f = self.opener("cache/branchheads")
663 lines = f.read().split('\n')
663 lines = f.read().split('\n')
664 f.close()
664 f.close()
665 except (IOError, OSError):
665 except (IOError, OSError):
666 return {}, nullid, nullrev
666 return {}, nullid, nullrev
667
667
668 try:
668 try:
669 last, lrev = lines.pop(0).split(" ", 1)
669 last, lrev = lines.pop(0).split(" ", 1)
670 last, lrev = bin(last), int(lrev)
670 last, lrev = bin(last), int(lrev)
671 if lrev >= len(self) or self[lrev].node() != last:
671 if lrev >= len(self) or self[lrev].node() != last:
672 # invalidate the cache
672 # invalidate the cache
673 raise ValueError('invalidating branch cache (tip differs)')
673 raise ValueError('invalidating branch cache (tip differs)')
674 for l in lines:
674 for l in lines:
675 if not l:
675 if not l:
676 continue
676 continue
677 node, label = l.split(" ", 1)
677 node, label = l.split(" ", 1)
678 label = encoding.tolocal(label.strip())
678 label = encoding.tolocal(label.strip())
679 if not node in self:
679 if not node in self:
680 raise ValueError('invalidating branch cache because node '+
680 raise ValueError('invalidating branch cache because node '+
681 '%s does not exist' % node)
681 '%s does not exist' % node)
682 partial.setdefault(label, []).append(bin(node))
682 partial.setdefault(label, []).append(bin(node))
683 except KeyboardInterrupt:
683 except KeyboardInterrupt:
684 raise
684 raise
685 except Exception, inst:
685 except Exception, inst:
686 if self.ui.debugflag:
686 if self.ui.debugflag:
687 self.ui.warn(str(inst), '\n')
687 self.ui.warn(str(inst), '\n')
688 partial, last, lrev = {}, nullid, nullrev
688 partial, last, lrev = {}, nullid, nullrev
689 return partial, last, lrev
689 return partial, last, lrev
690
690
691 def _writebranchcache(self, branches, tip, tiprev):
691 def _writebranchcache(self, branches, tip, tiprev):
692 try:
692 try:
693 f = self.opener("cache/branchheads", "w", atomictemp=True)
693 f = self.opener("cache/branchheads", "w", atomictemp=True)
694 f.write("%s %s\n" % (hex(tip), tiprev))
694 f.write("%s %s\n" % (hex(tip), tiprev))
695 for label, nodes in branches.iteritems():
695 for label, nodes in branches.iteritems():
696 for node in nodes:
696 for node in nodes:
697 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
697 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
698 f.close()
698 f.close()
699 except (IOError, OSError):
699 except (IOError, OSError):
700 pass
700 pass
701
701
702 def _updatebranchcache(self, partial, ctxgen):
702 def _updatebranchcache(self, partial, ctxgen):
703 """Given a branchhead cache, partial, that may have extra nodes or be
703 """Given a branchhead cache, partial, that may have extra nodes or be
704 missing heads, and a generator of nodes that are at least a superset of
704 missing heads, and a generator of nodes that are at least a superset of
705 heads missing, this function updates partial to be correct.
705 heads missing, this function updates partial to be correct.
706 """
706 """
707 # collect new branch entries
707 # collect new branch entries
708 newbranches = {}
708 newbranches = {}
709 for c in ctxgen:
709 for c in ctxgen:
710 newbranches.setdefault(c.branch(), []).append(c.node())
710 newbranches.setdefault(c.branch(), []).append(c.node())
711 # if older branchheads are reachable from new ones, they aren't
711 # if older branchheads are reachable from new ones, they aren't
712 # really branchheads. Note checking parents is insufficient:
712 # really branchheads. Note checking parents is insufficient:
713 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
713 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
714 for branch, newnodes in newbranches.iteritems():
714 for branch, newnodes in newbranches.iteritems():
715 bheads = partial.setdefault(branch, [])
715 bheads = partial.setdefault(branch, [])
716 # Remove candidate heads that no longer are in the repo (e.g., as
716 # Remove candidate heads that no longer are in the repo (e.g., as
717 # the result of a strip that just happened). Avoid using 'node in
717 # the result of a strip that just happened). Avoid using 'node in
718 # self' here because that dives down into branchcache code somewhat
718 # self' here because that dives down into branchcache code somewhat
719 # recursively.
719 # recursively.
720 bheadrevs = [self.changelog.rev(node) for node in bheads
720 bheadrevs = [self.changelog.rev(node) for node in bheads
721 if self.changelog.hasnode(node)]
721 if self.changelog.hasnode(node)]
722 newheadrevs = [self.changelog.rev(node) for node in newnodes
722 newheadrevs = [self.changelog.rev(node) for node in newnodes
723 if self.changelog.hasnode(node)]
723 if self.changelog.hasnode(node)]
724 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
724 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
725 # Remove duplicates - nodes that are in newheadrevs and are already
725 # Remove duplicates - nodes that are in newheadrevs and are already
726 # in bheadrevs. This can happen if you strip a node whose parent
726 # in bheadrevs. This can happen if you strip a node whose parent
727 # was already a head (because they're on different branches).
727 # was already a head (because they're on different branches).
728 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
728 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
729
729
730 # Starting from tip means fewer passes over reachable. If we know
730 # Starting from tip means fewer passes over reachable. If we know
731 # the new candidates are not ancestors of existing heads, we don't
731 # the new candidates are not ancestors of existing heads, we don't
732 # have to examine ancestors of existing heads
732 # have to examine ancestors of existing heads
733 if ctxisnew:
733 if ctxisnew:
734 iterrevs = sorted(newheadrevs)
734 iterrevs = sorted(newheadrevs)
735 else:
735 else:
736 iterrevs = list(bheadrevs)
736 iterrevs = list(bheadrevs)
737
737
738 # This loop prunes out two kinds of heads - heads that are
738 # This loop prunes out two kinds of heads - heads that are
739 # superseded by a head in newheadrevs, and newheadrevs that are not
739 # superseded by a head in newheadrevs, and newheadrevs that are not
740 # heads because an existing head is their descendant.
740 # heads because an existing head is their descendant.
741 while iterrevs:
741 while iterrevs:
742 latest = iterrevs.pop()
742 latest = iterrevs.pop()
743 if latest not in bheadrevs:
743 if latest not in bheadrevs:
744 continue
744 continue
745 ancestors = set(self.changelog.ancestors([latest],
745 ancestors = set(self.changelog.ancestors([latest],
746 bheadrevs[0]))
746 bheadrevs[0]))
747 if ancestors:
747 if ancestors:
748 bheadrevs = [b for b in bheadrevs if b not in ancestors]
748 bheadrevs = [b for b in bheadrevs if b not in ancestors]
749 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
749 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
750
750
751 # There may be branches that cease to exist when the last commit in the
751 # There may be branches that cease to exist when the last commit in the
752 # branch was stripped. This code filters them out. Note that the
752 # branch was stripped. This code filters them out. Note that the
753 # branch that ceased to exist may not be in newbranches because
753 # branch that ceased to exist may not be in newbranches because
754 # newbranches is the set of candidate heads, which when you strip the
754 # newbranches is the set of candidate heads, which when you strip the
755 # last commit in a branch will be the parent branch.
755 # last commit in a branch will be the parent branch.
756 for branch in partial.keys():
756 for branch in partial.keys():
757 nodes = [head for head in partial[branch]
757 nodes = [head for head in partial[branch]
758 if self.changelog.hasnode(head)]
758 if self.changelog.hasnode(head)]
759 if not nodes:
759 if not nodes:
760 del partial[branch]
760 del partial[branch]
761
761
762 def lookup(self, key):
762 def lookup(self, key):
763 return self[key].node()
763 return self[key].node()
764
764
765 def lookupbranch(self, key, remote=None):
765 def lookupbranch(self, key, remote=None):
766 repo = remote or self
766 repo = remote or self
767 if key in repo.branchmap():
767 if key in repo.branchmap():
768 return key
768 return key
769
769
770 repo = (remote and remote.local()) and remote or self
770 repo = (remote and remote.local()) and remote or self
771 return repo[key].branch()
771 return repo[key].branch()
772
772
773 def known(self, nodes):
773 def known(self, nodes):
774 nm = self.changelog.nodemap
774 nm = self.changelog.nodemap
775 pc = self._phasecache
775 pc = self._phasecache
776 result = []
776 result = []
777 for n in nodes:
777 for n in nodes:
778 r = nm.get(n)
778 r = nm.get(n)
779 resp = not (r is None or pc.phase(self, r) >= phases.secret)
779 resp = not (r is None or pc.phase(self, r) >= phases.secret)
780 result.append(resp)
780 result.append(resp)
781 return result
781 return result
782
782
783 def local(self):
783 def local(self):
784 return self
784 return self
785
785
786 def cancopy(self):
786 def cancopy(self):
787 return self.local() # so statichttprepo's override of local() works
787 return self.local() # so statichttprepo's override of local() works
788
788
789 def join(self, f):
789 def join(self, f):
790 return os.path.join(self.path, f)
790 return os.path.join(self.path, f)
791
791
792 def wjoin(self, f):
792 def wjoin(self, f):
793 return os.path.join(self.root, f)
793 return os.path.join(self.root, f)
794
794
795 def file(self, f):
795 def file(self, f):
796 if f[0] == '/':
796 if f[0] == '/':
797 f = f[1:]
797 f = f[1:]
798 return filelog.filelog(self.sopener, f)
798 return filelog.filelog(self.sopener, f)
799
799
800 def changectx(self, changeid):
800 def changectx(self, changeid):
801 return self[changeid]
801 return self[changeid]
802
802
803 def parents(self, changeid=None):
803 def parents(self, changeid=None):
804 '''get list of changectxs for parents of changeid'''
804 '''get list of changectxs for parents of changeid'''
805 return self[changeid].parents()
805 return self[changeid].parents()
806
806
807 def setparents(self, p1, p2=nullid):
807 def setparents(self, p1, p2=nullid):
808 copies = self.dirstate.setparents(p1, p2)
808 copies = self.dirstate.setparents(p1, p2)
809 if copies:
809 if copies:
810 # Adjust copy records, the dirstate cannot do it, it
810 # Adjust copy records, the dirstate cannot do it, it
811 # requires access to parents manifests. Preserve them
811 # requires access to parents manifests. Preserve them
812 # only for entries added to first parent.
812 # only for entries added to first parent.
813 pctx = self[p1]
813 pctx = self[p1]
814 for f in copies:
814 for f in copies:
815 if f not in pctx and copies[f] in pctx:
815 if f not in pctx and copies[f] in pctx:
816 self.dirstate.copy(copies[f], f)
816 self.dirstate.copy(copies[f], f)
817
817
818 def filectx(self, path, changeid=None, fileid=None):
818 def filectx(self, path, changeid=None, fileid=None):
819 """changeid can be a changeset revision, node, or tag.
819 """changeid can be a changeset revision, node, or tag.
820 fileid can be a file revision or node."""
820 fileid can be a file revision or node."""
821 return context.filectx(self, path, changeid, fileid)
821 return context.filectx(self, path, changeid, fileid)
822
822
823 def getcwd(self):
823 def getcwd(self):
824 return self.dirstate.getcwd()
824 return self.dirstate.getcwd()
825
825
826 def pathto(self, f, cwd=None):
826 def pathto(self, f, cwd=None):
827 return self.dirstate.pathto(f, cwd)
827 return self.dirstate.pathto(f, cwd)
828
828
829 def wfile(self, f, mode='r'):
829 def wfile(self, f, mode='r'):
830 return self.wopener(f, mode)
830 return self.wopener(f, mode)
831
831
832 def _link(self, f):
832 def _link(self, f):
833 return os.path.islink(self.wjoin(f))
833 return os.path.islink(self.wjoin(f))
834
834
835 def _loadfilter(self, filter):
835 def _loadfilter(self, filter):
836 if filter not in self.filterpats:
836 if filter not in self.filterpats:
837 l = []
837 l = []
838 for pat, cmd in self.ui.configitems(filter):
838 for pat, cmd in self.ui.configitems(filter):
839 if cmd == '!':
839 if cmd == '!':
840 continue
840 continue
841 mf = matchmod.match(self.root, '', [pat])
841 mf = matchmod.match(self.root, '', [pat])
842 fn = None
842 fn = None
843 params = cmd
843 params = cmd
844 for name, filterfn in self._datafilters.iteritems():
844 for name, filterfn in self._datafilters.iteritems():
845 if cmd.startswith(name):
845 if cmd.startswith(name):
846 fn = filterfn
846 fn = filterfn
847 params = cmd[len(name):].lstrip()
847 params = cmd[len(name):].lstrip()
848 break
848 break
849 if not fn:
849 if not fn:
850 fn = lambda s, c, **kwargs: util.filter(s, c)
850 fn = lambda s, c, **kwargs: util.filter(s, c)
851 # Wrap old filters not supporting keyword arguments
851 # Wrap old filters not supporting keyword arguments
852 if not inspect.getargspec(fn)[2]:
852 if not inspect.getargspec(fn)[2]:
853 oldfn = fn
853 oldfn = fn
854 fn = lambda s, c, **kwargs: oldfn(s, c)
854 fn = lambda s, c, **kwargs: oldfn(s, c)
855 l.append((mf, fn, params))
855 l.append((mf, fn, params))
856 self.filterpats[filter] = l
856 self.filterpats[filter] = l
857 return self.filterpats[filter]
857 return self.filterpats[filter]
858
858
859 def _filter(self, filterpats, filename, data):
859 def _filter(self, filterpats, filename, data):
860 for mf, fn, cmd in filterpats:
860 for mf, fn, cmd in filterpats:
861 if mf(filename):
861 if mf(filename):
862 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
862 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
863 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
863 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
864 break
864 break
865
865
866 return data
866 return data
867
867
868 @propertycache
868 @propertycache
869 def _encodefilterpats(self):
869 def _encodefilterpats(self):
870 return self._loadfilter('encode')
870 return self._loadfilter('encode')
871
871
872 @propertycache
872 @propertycache
873 def _decodefilterpats(self):
873 def _decodefilterpats(self):
874 return self._loadfilter('decode')
874 return self._loadfilter('decode')
875
875
876 def adddatafilter(self, name, filter):
876 def adddatafilter(self, name, filter):
877 self._datafilters[name] = filter
877 self._datafilters[name] = filter
878
878
879 def wread(self, filename):
879 def wread(self, filename):
880 if self._link(filename):
880 if self._link(filename):
881 data = os.readlink(self.wjoin(filename))
881 data = os.readlink(self.wjoin(filename))
882 else:
882 else:
883 data = self.wopener.read(filename)
883 data = self.wopener.read(filename)
884 return self._filter(self._encodefilterpats, filename, data)
884 return self._filter(self._encodefilterpats, filename, data)
885
885
886 def wwrite(self, filename, data, flags):
886 def wwrite(self, filename, data, flags):
887 data = self._filter(self._decodefilterpats, filename, data)
887 data = self._filter(self._decodefilterpats, filename, data)
888 if 'l' in flags:
888 if 'l' in flags:
889 self.wopener.symlink(data, filename)
889 self.wopener.symlink(data, filename)
890 else:
890 else:
891 self.wopener.write(filename, data)
891 self.wopener.write(filename, data)
892 if 'x' in flags:
892 if 'x' in flags:
893 util.setflags(self.wjoin(filename), False, True)
893 util.setflags(self.wjoin(filename), False, True)
894
894
895 def wwritedata(self, filename, data):
895 def wwritedata(self, filename, data):
896 return self._filter(self._decodefilterpats, filename, data)
896 return self._filter(self._decodefilterpats, filename, data)
897
897
898 def transaction(self, desc):
898 def transaction(self, desc):
899 tr = self._transref and self._transref() or None
899 tr = self._transref and self._transref() or None
900 if tr and tr.running():
900 if tr and tr.running():
901 return tr.nest()
901 return tr.nest()
902
902
903 # abort here if the journal already exists
903 # abort here if the journal already exists
904 if os.path.exists(self.sjoin("journal")):
904 if os.path.exists(self.sjoin("journal")):
905 raise error.RepoError(
905 raise error.RepoError(
906 _("abandoned transaction found - run hg recover"))
906 _("abandoned transaction found - run hg recover"))
907
907
908 self._writejournal(desc)
908 self._writejournal(desc)
909 renames = [(x, undoname(x)) for x in self._journalfiles()]
909 renames = [(x, undoname(x)) for x in self._journalfiles()]
910
910
911 tr = transaction.transaction(self.ui.warn, self.sopener,
911 tr = transaction.transaction(self.ui.warn, self.sopener,
912 self.sjoin("journal"),
912 self.sjoin("journal"),
913 aftertrans(renames),
913 aftertrans(renames),
914 self.store.createmode)
914 self.store.createmode)
915 self._transref = weakref.ref(tr)
915 self._transref = weakref.ref(tr)
916 return tr
916 return tr
917
917
918 def _journalfiles(self):
918 def _journalfiles(self):
919 return (self.sjoin('journal'), self.join('journal.dirstate'),
919 return (self.sjoin('journal'), self.join('journal.dirstate'),
920 self.join('journal.branch'), self.join('journal.desc'),
920 self.join('journal.branch'), self.join('journal.desc'),
921 self.join('journal.bookmarks'),
921 self.join('journal.bookmarks'),
922 self.sjoin('journal.phaseroots'))
922 self.sjoin('journal.phaseroots'))
923
923
924 def undofiles(self):
924 def undofiles(self):
925 return [undoname(x) for x in self._journalfiles()]
925 return [undoname(x) for x in self._journalfiles()]
926
926
927 def _writejournal(self, desc):
927 def _writejournal(self, desc):
928 self.opener.write("journal.dirstate",
928 self.opener.write("journal.dirstate",
929 self.opener.tryread("dirstate"))
929 self.opener.tryread("dirstate"))
930 self.opener.write("journal.branch",
930 self.opener.write("journal.branch",
931 encoding.fromlocal(self.dirstate.branch()))
931 encoding.fromlocal(self.dirstate.branch()))
932 self.opener.write("journal.desc",
932 self.opener.write("journal.desc",
933 "%d\n%s\n" % (len(self), desc))
933 "%d\n%s\n" % (len(self), desc))
934 self.opener.write("journal.bookmarks",
934 self.opener.write("journal.bookmarks",
935 self.opener.tryread("bookmarks"))
935 self.opener.tryread("bookmarks"))
936 self.sopener.write("journal.phaseroots",
936 self.sopener.write("journal.phaseroots",
937 self.sopener.tryread("phaseroots"))
937 self.sopener.tryread("phaseroots"))
938
938
939 def recover(self):
939 def recover(self):
940 lock = self.lock()
940 lock = self.lock()
941 try:
941 try:
942 if os.path.exists(self.sjoin("journal")):
942 if os.path.exists(self.sjoin("journal")):
943 self.ui.status(_("rolling back interrupted transaction\n"))
943 self.ui.status(_("rolling back interrupted transaction\n"))
944 transaction.rollback(self.sopener, self.sjoin("journal"),
944 transaction.rollback(self.sopener, self.sjoin("journal"),
945 self.ui.warn)
945 self.ui.warn)
946 self.invalidate()
946 self.invalidate()
947 return True
947 return True
948 else:
948 else:
949 self.ui.warn(_("no interrupted transaction available\n"))
949 self.ui.warn(_("no interrupted transaction available\n"))
950 return False
950 return False
951 finally:
951 finally:
952 lock.release()
952 lock.release()
953
953
954 def rollback(self, dryrun=False, force=False):
954 def rollback(self, dryrun=False, force=False):
955 wlock = lock = None
955 wlock = lock = None
956 try:
956 try:
957 wlock = self.wlock()
957 wlock = self.wlock()
958 lock = self.lock()
958 lock = self.lock()
959 if os.path.exists(self.sjoin("undo")):
959 if os.path.exists(self.sjoin("undo")):
960 return self._rollback(dryrun, force)
960 return self._rollback(dryrun, force)
961 else:
961 else:
962 self.ui.warn(_("no rollback information available\n"))
962 self.ui.warn(_("no rollback information available\n"))
963 return 1
963 return 1
964 finally:
964 finally:
965 release(lock, wlock)
965 release(lock, wlock)
966
966
967 def _rollback(self, dryrun, force):
967 def _rollback(self, dryrun, force):
968 ui = self.ui
968 ui = self.ui
969 try:
969 try:
970 args = self.opener.read('undo.desc').splitlines()
970 args = self.opener.read('undo.desc').splitlines()
971 (oldlen, desc, detail) = (int(args[0]), args[1], None)
971 (oldlen, desc, detail) = (int(args[0]), args[1], None)
972 if len(args) >= 3:
972 if len(args) >= 3:
973 detail = args[2]
973 detail = args[2]
974 oldtip = oldlen - 1
974 oldtip = oldlen - 1
975
975
976 if detail and ui.verbose:
976 if detail and ui.verbose:
977 msg = (_('repository tip rolled back to revision %s'
977 msg = (_('repository tip rolled back to revision %s'
978 ' (undo %s: %s)\n')
978 ' (undo %s: %s)\n')
979 % (oldtip, desc, detail))
979 % (oldtip, desc, detail))
980 else:
980 else:
981 msg = (_('repository tip rolled back to revision %s'
981 msg = (_('repository tip rolled back to revision %s'
982 ' (undo %s)\n')
982 ' (undo %s)\n')
983 % (oldtip, desc))
983 % (oldtip, desc))
984 except IOError:
984 except IOError:
985 msg = _('rolling back unknown transaction\n')
985 msg = _('rolling back unknown transaction\n')
986 desc = None
986 desc = None
987
987
988 if not force and self['.'] != self['tip'] and desc == 'commit':
988 if not force and self['.'] != self['tip'] and desc == 'commit':
989 raise util.Abort(
989 raise util.Abort(
990 _('rollback of last commit while not checked out '
990 _('rollback of last commit while not checked out '
991 'may lose data'), hint=_('use -f to force'))
991 'may lose data'), hint=_('use -f to force'))
992
992
993 ui.status(msg)
993 ui.status(msg)
994 if dryrun:
994 if dryrun:
995 return 0
995 return 0
996
996
997 parents = self.dirstate.parents()
997 parents = self.dirstate.parents()
998 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
998 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
999 if os.path.exists(self.join('undo.bookmarks')):
999 if os.path.exists(self.join('undo.bookmarks')):
1000 util.rename(self.join('undo.bookmarks'),
1000 util.rename(self.join('undo.bookmarks'),
1001 self.join('bookmarks'))
1001 self.join('bookmarks'))
1002 if os.path.exists(self.sjoin('undo.phaseroots')):
1002 if os.path.exists(self.sjoin('undo.phaseroots')):
1003 util.rename(self.sjoin('undo.phaseroots'),
1003 util.rename(self.sjoin('undo.phaseroots'),
1004 self.sjoin('phaseroots'))
1004 self.sjoin('phaseroots'))
1005 self.invalidate()
1005 self.invalidate()
1006
1006
1007 # Discard all cache entries to force reloading everything.
1007 # Discard all cache entries to force reloading everything.
1008 self._filecache.clear()
1008 self._filecache.clear()
1009
1009
1010 parentgone = (parents[0] not in self.changelog.nodemap or
1010 parentgone = (parents[0] not in self.changelog.nodemap or
1011 parents[1] not in self.changelog.nodemap)
1011 parents[1] not in self.changelog.nodemap)
1012 if parentgone:
1012 if parentgone:
1013 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1013 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1014 try:
1014 try:
1015 branch = self.opener.read('undo.branch')
1015 branch = self.opener.read('undo.branch')
1016 self.dirstate.setbranch(encoding.tolocal(branch))
1016 self.dirstate.setbranch(encoding.tolocal(branch))
1017 except IOError:
1017 except IOError:
1018 ui.warn(_('named branch could not be reset: '
1018 ui.warn(_('named branch could not be reset: '
1019 'current branch is still \'%s\'\n')
1019 'current branch is still \'%s\'\n')
1020 % self.dirstate.branch())
1020 % self.dirstate.branch())
1021
1021
1022 self.dirstate.invalidate()
1022 self.dirstate.invalidate()
1023 parents = tuple([p.rev() for p in self.parents()])
1023 parents = tuple([p.rev() for p in self.parents()])
1024 if len(parents) > 1:
1024 if len(parents) > 1:
1025 ui.status(_('working directory now based on '
1025 ui.status(_('working directory now based on '
1026 'revisions %d and %d\n') % parents)
1026 'revisions %d and %d\n') % parents)
1027 else:
1027 else:
1028 ui.status(_('working directory now based on '
1028 ui.status(_('working directory now based on '
1029 'revision %d\n') % parents)
1029 'revision %d\n') % parents)
1030 # TODO: if we know which new heads may result from this rollback, pass
1030 # TODO: if we know which new heads may result from this rollback, pass
1031 # them to destroy(), which will prevent the branchhead cache from being
1031 # them to destroy(), which will prevent the branchhead cache from being
1032 # invalidated.
1032 # invalidated.
1033 self.destroyed()
1033 self.destroyed()
1034 return 0
1034 return 0
1035
1035
1036 def invalidatecaches(self):
1036 def invalidatecaches(self):
1037 def delcache(name):
1037 def delcache(name):
1038 try:
1038 try:
1039 delattr(self, name)
1039 delattr(self, name)
1040 except AttributeError:
1040 except AttributeError:
1041 pass
1041 pass
1042
1042
1043 delcache('_tagscache')
1043 delcache('_tagscache')
1044
1044
1045 self._branchcache = None # in UTF-8
1045 self._branchcache = None # in UTF-8
1046 self._branchcachetip = None
1046 self._branchcachetip = None
1047 obsolete.clearobscaches(self)
1047 obsolete.clearobscaches(self)
1048
1048
1049 def invalidatedirstate(self):
1049 def invalidatedirstate(self):
1050 '''Invalidates the dirstate, causing the next call to dirstate
1050 '''Invalidates the dirstate, causing the next call to dirstate
1051 to check if it was modified since the last time it was read,
1051 to check if it was modified since the last time it was read,
1052 rereading it if it has.
1052 rereading it if it has.
1053
1053
1054 This is different to dirstate.invalidate() that it doesn't always
1054 This is different to dirstate.invalidate() that it doesn't always
1055 rereads the dirstate. Use dirstate.invalidate() if you want to
1055 rereads the dirstate. Use dirstate.invalidate() if you want to
1056 explicitly read the dirstate again (i.e. restoring it to a previous
1056 explicitly read the dirstate again (i.e. restoring it to a previous
1057 known good state).'''
1057 known good state).'''
1058 if 'dirstate' in self.__dict__:
1058 if 'dirstate' in self.__dict__:
1059 for k in self.dirstate._filecache:
1059 for k in self.dirstate._filecache:
1060 try:
1060 try:
1061 delattr(self.dirstate, k)
1061 delattr(self.dirstate, k)
1062 except AttributeError:
1062 except AttributeError:
1063 pass
1063 pass
1064 delattr(self, 'dirstate')
1064 delattr(self, 'dirstate')
1065
1065
1066 def invalidate(self):
1066 def invalidate(self):
1067 for k in self._filecache:
1067 for k in self._filecache:
1068 # dirstate is invalidated separately in invalidatedirstate()
1068 # dirstate is invalidated separately in invalidatedirstate()
1069 if k == 'dirstate':
1069 if k == 'dirstate':
1070 continue
1070 continue
1071
1071
1072 try:
1072 try:
1073 delattr(self, k)
1073 delattr(self, k)
1074 except AttributeError:
1074 except AttributeError:
1075 pass
1075 pass
1076 self.invalidatecaches()
1076 self.invalidatecaches()
1077
1077
1078 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1078 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1079 try:
1079 try:
1080 l = lock.lock(lockname, 0, releasefn, desc=desc)
1080 l = lock.lock(lockname, 0, releasefn, desc=desc)
1081 except error.LockHeld, inst:
1081 except error.LockHeld, inst:
1082 if not wait:
1082 if not wait:
1083 raise
1083 raise
1084 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1084 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1085 (desc, inst.locker))
1085 (desc, inst.locker))
1086 # default to 600 seconds timeout
1086 # default to 600 seconds timeout
1087 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1087 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1088 releasefn, desc=desc)
1088 releasefn, desc=desc)
1089 if acquirefn:
1089 if acquirefn:
1090 acquirefn()
1090 acquirefn()
1091 return l
1091 return l
1092
1092
1093 def _afterlock(self, callback):
1093 def _afterlock(self, callback):
1094 """add a callback to the current repository lock.
1094 """add a callback to the current repository lock.
1095
1095
1096 The callback will be executed on lock release."""
1096 The callback will be executed on lock release."""
1097 l = self._lockref and self._lockref()
1097 l = self._lockref and self._lockref()
1098 if l:
1098 if l:
1099 l.postrelease.append(callback)
1099 l.postrelease.append(callback)
1100 else:
1100 else:
1101 callback()
1101 callback()
1102
1102
1103 def lock(self, wait=True):
1103 def lock(self, wait=True):
1104 '''Lock the repository store (.hg/store) and return a weak reference
1104 '''Lock the repository store (.hg/store) and return a weak reference
1105 to the lock. Use this before modifying the store (e.g. committing or
1105 to the lock. Use this before modifying the store (e.g. committing or
1106 stripping). If you are opening a transaction, get a lock as well.)'''
1106 stripping). If you are opening a transaction, get a lock as well.)'''
1107 l = self._lockref and self._lockref()
1107 l = self._lockref and self._lockref()
1108 if l is not None and l.held:
1108 if l is not None and l.held:
1109 l.lock()
1109 l.lock()
1110 return l
1110 return l
1111
1111
1112 def unlock():
1112 def unlock():
1113 self.store.write()
1113 self.store.write()
1114 if '_phasecache' in vars(self):
1114 if '_phasecache' in vars(self):
1115 self._phasecache.write()
1115 self._phasecache.write()
1116 for k, ce in self._filecache.items():
1116 for k, ce in self._filecache.items():
1117 if k == 'dirstate':
1117 if k == 'dirstate':
1118 continue
1118 continue
1119 ce.refresh()
1119 ce.refresh()
1120
1120
1121 l = self._lock(self.sjoin("lock"), wait, unlock,
1121 l = self._lock(self.sjoin("lock"), wait, unlock,
1122 self.invalidate, _('repository %s') % self.origroot)
1122 self.invalidate, _('repository %s') % self.origroot)
1123 self._lockref = weakref.ref(l)
1123 self._lockref = weakref.ref(l)
1124 return l
1124 return l
1125
1125
1126 def wlock(self, wait=True):
1126 def wlock(self, wait=True):
1127 '''Lock the non-store parts of the repository (everything under
1127 '''Lock the non-store parts of the repository (everything under
1128 .hg except .hg/store) and return a weak reference to the lock.
1128 .hg except .hg/store) and return a weak reference to the lock.
1129 Use this before modifying files in .hg.'''
1129 Use this before modifying files in .hg.'''
1130 l = self._wlockref and self._wlockref()
1130 l = self._wlockref and self._wlockref()
1131 if l is not None and l.held:
1131 if l is not None and l.held:
1132 l.lock()
1132 l.lock()
1133 return l
1133 return l
1134
1134
1135 def unlock():
1135 def unlock():
1136 self.dirstate.write()
1136 self.dirstate.write()
1137 ce = self._filecache.get('dirstate')
1137 ce = self._filecache.get('dirstate')
1138 if ce:
1138 if ce:
1139 ce.refresh()
1139 ce.refresh()
1140
1140
1141 l = self._lock(self.join("wlock"), wait, unlock,
1141 l = self._lock(self.join("wlock"), wait, unlock,
1142 self.invalidatedirstate, _('working directory of %s') %
1142 self.invalidatedirstate, _('working directory of %s') %
1143 self.origroot)
1143 self.origroot)
1144 self._wlockref = weakref.ref(l)
1144 self._wlockref = weakref.ref(l)
1145 return l
1145 return l
1146
1146
1147 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1147 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1148 """
1148 """
1149 commit an individual file as part of a larger transaction
1149 commit an individual file as part of a larger transaction
1150 """
1150 """
1151
1151
1152 fname = fctx.path()
1152 fname = fctx.path()
1153 text = fctx.data()
1153 text = fctx.data()
1154 flog = self.file(fname)
1154 flog = self.file(fname)
1155 fparent1 = manifest1.get(fname, nullid)
1155 fparent1 = manifest1.get(fname, nullid)
1156 fparent2 = fparent2o = manifest2.get(fname, nullid)
1156 fparent2 = fparent2o = manifest2.get(fname, nullid)
1157
1157
1158 meta = {}
1158 meta = {}
1159 copy = fctx.renamed()
1159 copy = fctx.renamed()
1160 if copy and copy[0] != fname:
1160 if copy and copy[0] != fname:
1161 # Mark the new revision of this file as a copy of another
1161 # Mark the new revision of this file as a copy of another
1162 # file. This copy data will effectively act as a parent
1162 # file. This copy data will effectively act as a parent
1163 # of this new revision. If this is a merge, the first
1163 # of this new revision. If this is a merge, the first
1164 # parent will be the nullid (meaning "look up the copy data")
1164 # parent will be the nullid (meaning "look up the copy data")
1165 # and the second one will be the other parent. For example:
1165 # and the second one will be the other parent. For example:
1166 #
1166 #
1167 # 0 --- 1 --- 3 rev1 changes file foo
1167 # 0 --- 1 --- 3 rev1 changes file foo
1168 # \ / rev2 renames foo to bar and changes it
1168 # \ / rev2 renames foo to bar and changes it
1169 # \- 2 -/ rev3 should have bar with all changes and
1169 # \- 2 -/ rev3 should have bar with all changes and
1170 # should record that bar descends from
1170 # should record that bar descends from
1171 # bar in rev2 and foo in rev1
1171 # bar in rev2 and foo in rev1
1172 #
1172 #
1173 # this allows this merge to succeed:
1173 # this allows this merge to succeed:
1174 #
1174 #
1175 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1175 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1176 # \ / merging rev3 and rev4 should use bar@rev2
1176 # \ / merging rev3 and rev4 should use bar@rev2
1177 # \- 2 --- 4 as the merge base
1177 # \- 2 --- 4 as the merge base
1178 #
1178 #
1179
1179
1180 cfname = copy[0]
1180 cfname = copy[0]
1181 crev = manifest1.get(cfname)
1181 crev = manifest1.get(cfname)
1182 newfparent = fparent2
1182 newfparent = fparent2
1183
1183
1184 if manifest2: # branch merge
1184 if manifest2: # branch merge
1185 if fparent2 == nullid or crev is None: # copied on remote side
1185 if fparent2 == nullid or crev is None: # copied on remote side
1186 if cfname in manifest2:
1186 if cfname in manifest2:
1187 crev = manifest2[cfname]
1187 crev = manifest2[cfname]
1188 newfparent = fparent1
1188 newfparent = fparent1
1189
1189
1190 # find source in nearest ancestor if we've lost track
1190 # find source in nearest ancestor if we've lost track
1191 if not crev:
1191 if not crev:
1192 self.ui.debug(" %s: searching for copy revision for %s\n" %
1192 self.ui.debug(" %s: searching for copy revision for %s\n" %
1193 (fname, cfname))
1193 (fname, cfname))
1194 for ancestor in self[None].ancestors():
1194 for ancestor in self[None].ancestors():
1195 if cfname in ancestor:
1195 if cfname in ancestor:
1196 crev = ancestor[cfname].filenode()
1196 crev = ancestor[cfname].filenode()
1197 break
1197 break
1198
1198
1199 if crev:
1199 if crev:
1200 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1200 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1201 meta["copy"] = cfname
1201 meta["copy"] = cfname
1202 meta["copyrev"] = hex(crev)
1202 meta["copyrev"] = hex(crev)
1203 fparent1, fparent2 = nullid, newfparent
1203 fparent1, fparent2 = nullid, newfparent
1204 else:
1204 else:
1205 self.ui.warn(_("warning: can't find ancestor for '%s' "
1205 self.ui.warn(_("warning: can't find ancestor for '%s' "
1206 "copied from '%s'!\n") % (fname, cfname))
1206 "copied from '%s'!\n") % (fname, cfname))
1207
1207
1208 elif fparent2 != nullid:
1208 elif fparent2 != nullid:
1209 # is one parent an ancestor of the other?
1209 # is one parent an ancestor of the other?
1210 fparentancestor = flog.ancestor(fparent1, fparent2)
1210 fparentancestor = flog.ancestor(fparent1, fparent2)
1211 if fparentancestor == fparent1:
1211 if fparentancestor == fparent1:
1212 fparent1, fparent2 = fparent2, nullid
1212 fparent1, fparent2 = fparent2, nullid
1213 elif fparentancestor == fparent2:
1213 elif fparentancestor == fparent2:
1214 fparent2 = nullid
1214 fparent2 = nullid
1215
1215
1216 # is the file changed?
1216 # is the file changed?
1217 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1217 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1218 changelist.append(fname)
1218 changelist.append(fname)
1219 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1219 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1220
1220
1221 # are just the flags changed during merge?
1221 # are just the flags changed during merge?
1222 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1222 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1223 changelist.append(fname)
1223 changelist.append(fname)
1224
1224
1225 return fparent1
1225 return fparent1
1226
1226
1227 def commit(self, text="", user=None, date=None, match=None, force=False,
1227 def commit(self, text="", user=None, date=None, match=None, force=False,
1228 editor=False, extra={}):
1228 editor=False, extra={}):
1229 """Add a new revision to current repository.
1229 """Add a new revision to current repository.
1230
1230
1231 Revision information is gathered from the working directory,
1231 Revision information is gathered from the working directory,
1232 match can be used to filter the committed files. If editor is
1232 match can be used to filter the committed files. If editor is
1233 supplied, it is called to get a commit message.
1233 supplied, it is called to get a commit message.
1234 """
1234 """
1235
1235
1236 def fail(f, msg):
1236 def fail(f, msg):
1237 raise util.Abort('%s: %s' % (f, msg))
1237 raise util.Abort('%s: %s' % (f, msg))
1238
1238
1239 if not match:
1239 if not match:
1240 match = matchmod.always(self.root, '')
1240 match = matchmod.always(self.root, '')
1241
1241
1242 if not force:
1242 if not force:
1243 vdirs = []
1243 vdirs = []
1244 match.dir = vdirs.append
1244 match.dir = vdirs.append
1245 match.bad = fail
1245 match.bad = fail
1246
1246
1247 wlock = self.wlock()
1247 wlock = self.wlock()
1248 try:
1248 try:
1249 wctx = self[None]
1249 wctx = self[None]
1250 merge = len(wctx.parents()) > 1
1250 merge = len(wctx.parents()) > 1
1251
1251
1252 if (not force and merge and match and
1252 if (not force and merge and match and
1253 (match.files() or match.anypats())):
1253 (match.files() or match.anypats())):
1254 raise util.Abort(_('cannot partially commit a merge '
1254 raise util.Abort(_('cannot partially commit a merge '
1255 '(do not specify files or patterns)'))
1255 '(do not specify files or patterns)'))
1256
1256
1257 changes = self.status(match=match, clean=force)
1257 changes = self.status(match=match, clean=force)
1258 if force:
1258 if force:
1259 changes[0].extend(changes[6]) # mq may commit unchanged files
1259 changes[0].extend(changes[6]) # mq may commit unchanged files
1260
1260
1261 # check subrepos
1261 # check subrepos
1262 subs = []
1262 subs = []
1263 commitsubs = set()
1263 commitsubs = set()
1264 newstate = wctx.substate.copy()
1264 newstate = wctx.substate.copy()
1265 # only manage subrepos and .hgsubstate if .hgsub is present
1265 # only manage subrepos and .hgsubstate if .hgsub is present
1266 if '.hgsub' in wctx:
1266 if '.hgsub' in wctx:
1267 # we'll decide whether to track this ourselves, thanks
1267 # we'll decide whether to track this ourselves, thanks
1268 if '.hgsubstate' in changes[0]:
1268 if '.hgsubstate' in changes[0]:
1269 changes[0].remove('.hgsubstate')
1269 changes[0].remove('.hgsubstate')
1270 if '.hgsubstate' in changes[2]:
1270 if '.hgsubstate' in changes[2]:
1271 changes[2].remove('.hgsubstate')
1271 changes[2].remove('.hgsubstate')
1272
1272
1273 # compare current state to last committed state
1273 # compare current state to last committed state
1274 # build new substate based on last committed state
1274 # build new substate based on last committed state
1275 oldstate = wctx.p1().substate
1275 oldstate = wctx.p1().substate
1276 for s in sorted(newstate.keys()):
1276 for s in sorted(newstate.keys()):
1277 if not match(s):
1277 if not match(s):
1278 # ignore working copy, use old state if present
1278 # ignore working copy, use old state if present
1279 if s in oldstate:
1279 if s in oldstate:
1280 newstate[s] = oldstate[s]
1280 newstate[s] = oldstate[s]
1281 continue
1281 continue
1282 if not force:
1282 if not force:
1283 raise util.Abort(
1283 raise util.Abort(
1284 _("commit with new subrepo %s excluded") % s)
1284 _("commit with new subrepo %s excluded") % s)
1285 if wctx.sub(s).dirty(True):
1285 if wctx.sub(s).dirty(True):
1286 if not self.ui.configbool('ui', 'commitsubrepos'):
1286 if not self.ui.configbool('ui', 'commitsubrepos'):
1287 raise util.Abort(
1287 raise util.Abort(
1288 _("uncommitted changes in subrepo %s") % s,
1288 _("uncommitted changes in subrepo %s") % s,
1289 hint=_("use --subrepos for recursive commit"))
1289 hint=_("use --subrepos for recursive commit"))
1290 subs.append(s)
1290 subs.append(s)
1291 commitsubs.add(s)
1291 commitsubs.add(s)
1292 else:
1292 else:
1293 bs = wctx.sub(s).basestate()
1293 bs = wctx.sub(s).basestate()
1294 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1294 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1295 if oldstate.get(s, (None, None, None))[1] != bs:
1295 if oldstate.get(s, (None, None, None))[1] != bs:
1296 subs.append(s)
1296 subs.append(s)
1297
1297
1298 # check for removed subrepos
1298 # check for removed subrepos
1299 for p in wctx.parents():
1299 for p in wctx.parents():
1300 r = [s for s in p.substate if s not in newstate]
1300 r = [s for s in p.substate if s not in newstate]
1301 subs += [s for s in r if match(s)]
1301 subs += [s for s in r if match(s)]
1302 if subs:
1302 if subs:
1303 if (not match('.hgsub') and
1303 if (not match('.hgsub') and
1304 '.hgsub' in (wctx.modified() + wctx.added())):
1304 '.hgsub' in (wctx.modified() + wctx.added())):
1305 raise util.Abort(
1305 raise util.Abort(
1306 _("can't commit subrepos without .hgsub"))
1306 _("can't commit subrepos without .hgsub"))
1307 changes[0].insert(0, '.hgsubstate')
1307 changes[0].insert(0, '.hgsubstate')
1308
1308
1309 elif '.hgsub' in changes[2]:
1309 elif '.hgsub' in changes[2]:
1310 # clean up .hgsubstate when .hgsub is removed
1310 # clean up .hgsubstate when .hgsub is removed
1311 if ('.hgsubstate' in wctx and
1311 if ('.hgsubstate' in wctx and
1312 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1312 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1313 changes[2].insert(0, '.hgsubstate')
1313 changes[2].insert(0, '.hgsubstate')
1314
1314
1315 # make sure all explicit patterns are matched
1315 # make sure all explicit patterns are matched
1316 if not force and match.files():
1316 if not force and match.files():
1317 matched = set(changes[0] + changes[1] + changes[2])
1317 matched = set(changes[0] + changes[1] + changes[2])
1318
1318
1319 for f in match.files():
1319 for f in match.files():
1320 f = self.dirstate.normalize(f)
1320 f = self.dirstate.normalize(f)
1321 if f == '.' or f in matched or f in wctx.substate:
1321 if f == '.' or f in matched or f in wctx.substate:
1322 continue
1322 continue
1323 if f in changes[3]: # missing
1323 if f in changes[3]: # missing
1324 fail(f, _('file not found!'))
1324 fail(f, _('file not found!'))
1325 if f in vdirs: # visited directory
1325 if f in vdirs: # visited directory
1326 d = f + '/'
1326 d = f + '/'
1327 for mf in matched:
1327 for mf in matched:
1328 if mf.startswith(d):
1328 if mf.startswith(d):
1329 break
1329 break
1330 else:
1330 else:
1331 fail(f, _("no match under directory!"))
1331 fail(f, _("no match under directory!"))
1332 elif f not in self.dirstate:
1332 elif f not in self.dirstate:
1333 fail(f, _("file not tracked!"))
1333 fail(f, _("file not tracked!"))
1334
1334
1335 if (not force and not extra.get("close") and not merge
1335 if (not force and not extra.get("close") and not merge
1336 and not (changes[0] or changes[1] or changes[2])
1336 and not (changes[0] or changes[1] or changes[2])
1337 and wctx.branch() == wctx.p1().branch()):
1337 and wctx.branch() == wctx.p1().branch()):
1338 return None
1338 return None
1339
1339
1340 if merge and changes[3]:
1340 if merge and changes[3]:
1341 raise util.Abort(_("cannot commit merge with missing files"))
1341 raise util.Abort(_("cannot commit merge with missing files"))
1342
1342
1343 ms = mergemod.mergestate(self)
1343 ms = mergemod.mergestate(self)
1344 for f in changes[0]:
1344 for f in changes[0]:
1345 if f in ms and ms[f] == 'u':
1345 if f in ms and ms[f] == 'u':
1346 raise util.Abort(_("unresolved merge conflicts "
1346 raise util.Abort(_("unresolved merge conflicts "
1347 "(see hg help resolve)"))
1347 "(see hg help resolve)"))
1348
1348
1349 cctx = context.workingctx(self, text, user, date, extra, changes)
1349 cctx = context.workingctx(self, text, user, date, extra, changes)
1350 if editor:
1350 if editor:
1351 cctx._text = editor(self, cctx, subs)
1351 cctx._text = editor(self, cctx, subs)
1352 edited = (text != cctx._text)
1352 edited = (text != cctx._text)
1353
1353
1354 # commit subs and write new state
1354 # commit subs and write new state
1355 if subs:
1355 if subs:
1356 for s in sorted(commitsubs):
1356 for s in sorted(commitsubs):
1357 sub = wctx.sub(s)
1357 sub = wctx.sub(s)
1358 self.ui.status(_('committing subrepository %s\n') %
1358 self.ui.status(_('committing subrepository %s\n') %
1359 subrepo.subrelpath(sub))
1359 subrepo.subrelpath(sub))
1360 sr = sub.commit(cctx._text, user, date)
1360 sr = sub.commit(cctx._text, user, date)
1361 newstate[s] = (newstate[s][0], sr)
1361 newstate[s] = (newstate[s][0], sr)
1362 subrepo.writestate(self, newstate)
1362 subrepo.writestate(self, newstate)
1363
1363
1364 # Save commit message in case this transaction gets rolled back
1364 # Save commit message in case this transaction gets rolled back
1365 # (e.g. by a pretxncommit hook). Leave the content alone on
1365 # (e.g. by a pretxncommit hook). Leave the content alone on
1366 # the assumption that the user will use the same editor again.
1366 # the assumption that the user will use the same editor again.
1367 msgfn = self.savecommitmessage(cctx._text)
1367 msgfn = self.savecommitmessage(cctx._text)
1368
1368
1369 p1, p2 = self.dirstate.parents()
1369 p1, p2 = self.dirstate.parents()
1370 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1370 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1371 try:
1371 try:
1372 self.hook("precommit", throw=True, parent1=hookp1,
1372 self.hook("precommit", throw=True, parent1=hookp1,
1373 parent2=hookp2)
1373 parent2=hookp2)
1374 ret = self.commitctx(cctx, True)
1374 ret = self.commitctx(cctx, True)
1375 except: # re-raises
1375 except: # re-raises
1376 if edited:
1376 if edited:
1377 self.ui.write(
1377 self.ui.write(
1378 _('note: commit message saved in %s\n') % msgfn)
1378 _('note: commit message saved in %s\n') % msgfn)
1379 raise
1379 raise
1380
1380
1381 # update bookmarks, dirstate and mergestate
1381 # update bookmarks, dirstate and mergestate
1382 bookmarks.update(self, [p1, p2], ret)
1382 bookmarks.update(self, [p1, p2], ret)
1383 for f in changes[0] + changes[1]:
1383 for f in changes[0] + changes[1]:
1384 self.dirstate.normal(f)
1384 self.dirstate.normal(f)
1385 for f in changes[2]:
1385 for f in changes[2]:
1386 self.dirstate.drop(f)
1386 self.dirstate.drop(f)
1387 self.dirstate.setparents(ret)
1387 self.dirstate.setparents(ret)
1388 ms.reset()
1388 ms.reset()
1389 finally:
1389 finally:
1390 wlock.release()
1390 wlock.release()
1391
1391
1392 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1392 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1393 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1393 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1394 self._afterlock(commithook)
1394 self._afterlock(commithook)
1395 return ret
1395 return ret
1396
1396
1397 def commitctx(self, ctx, error=False):
1397 def commitctx(self, ctx, error=False):
1398 """Add a new revision to current repository.
1398 """Add a new revision to current repository.
1399 Revision information is passed via the context argument.
1399 Revision information is passed via the context argument.
1400 """
1400 """
1401
1401
1402 tr = lock = None
1402 tr = lock = None
1403 removed = list(ctx.removed())
1403 removed = list(ctx.removed())
1404 p1, p2 = ctx.p1(), ctx.p2()
1404 p1, p2 = ctx.p1(), ctx.p2()
1405 user = ctx.user()
1405 user = ctx.user()
1406
1406
1407 lock = self.lock()
1407 lock = self.lock()
1408 try:
1408 try:
1409 tr = self.transaction("commit")
1409 tr = self.transaction("commit")
1410 trp = weakref.proxy(tr)
1410 trp = weakref.proxy(tr)
1411
1411
1412 if ctx.files():
1412 if ctx.files():
1413 m1 = p1.manifest().copy()
1413 m1 = p1.manifest().copy()
1414 m2 = p2.manifest()
1414 m2 = p2.manifest()
1415
1415
1416 # check in files
1416 # check in files
1417 new = {}
1417 new = {}
1418 changed = []
1418 changed = []
1419 linkrev = len(self)
1419 linkrev = len(self)
1420 for f in sorted(ctx.modified() + ctx.added()):
1420 for f in sorted(ctx.modified() + ctx.added()):
1421 self.ui.note(f + "\n")
1421 self.ui.note(f + "\n")
1422 try:
1422 try:
1423 fctx = ctx[f]
1423 fctx = ctx[f]
1424 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1424 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1425 changed)
1425 changed)
1426 m1.set(f, fctx.flags())
1426 m1.set(f, fctx.flags())
1427 except OSError, inst:
1427 except OSError, inst:
1428 self.ui.warn(_("trouble committing %s!\n") % f)
1428 self.ui.warn(_("trouble committing %s!\n") % f)
1429 raise
1429 raise
1430 except IOError, inst:
1430 except IOError, inst:
1431 errcode = getattr(inst, 'errno', errno.ENOENT)
1431 errcode = getattr(inst, 'errno', errno.ENOENT)
1432 if error or errcode and errcode != errno.ENOENT:
1432 if error or errcode and errcode != errno.ENOENT:
1433 self.ui.warn(_("trouble committing %s!\n") % f)
1433 self.ui.warn(_("trouble committing %s!\n") % f)
1434 raise
1434 raise
1435 else:
1435 else:
1436 removed.append(f)
1436 removed.append(f)
1437
1437
1438 # update manifest
1438 # update manifest
1439 m1.update(new)
1439 m1.update(new)
1440 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1440 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1441 drop = [f for f in removed if f in m1]
1441 drop = [f for f in removed if f in m1]
1442 for f in drop:
1442 for f in drop:
1443 del m1[f]
1443 del m1[f]
1444 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1444 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1445 p2.manifestnode(), (new, drop))
1445 p2.manifestnode(), (new, drop))
1446 files = changed + removed
1446 files = changed + removed
1447 else:
1447 else:
1448 mn = p1.manifestnode()
1448 mn = p1.manifestnode()
1449 files = []
1449 files = []
1450
1450
1451 # update changelog
1451 # update changelog
1452 self.changelog.delayupdate()
1452 self.changelog.delayupdate()
1453 n = self.changelog.add(mn, files, ctx.description(),
1453 n = self.changelog.add(mn, files, ctx.description(),
1454 trp, p1.node(), p2.node(),
1454 trp, p1.node(), p2.node(),
1455 user, ctx.date(), ctx.extra().copy())
1455 user, ctx.date(), ctx.extra().copy())
1456 p = lambda: self.changelog.writepending() and self.root or ""
1456 p = lambda: self.changelog.writepending() and self.root or ""
1457 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1457 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1458 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1458 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1459 parent2=xp2, pending=p)
1459 parent2=xp2, pending=p)
1460 self.changelog.finalize(trp)
1460 self.changelog.finalize(trp)
1461 # set the new commit is proper phase
1461 # set the new commit is proper phase
1462 targetphase = phases.newcommitphase(self.ui)
1462 targetphase = phases.newcommitphase(self.ui)
1463 if targetphase:
1463 if targetphase:
1464 # retract boundary do not alter parent changeset.
1464 # retract boundary do not alter parent changeset.
1465 # if a parent have higher the resulting phase will
1465 # if a parent have higher the resulting phase will
1466 # be compliant anyway
1466 # be compliant anyway
1467 #
1467 #
1468 # if minimal phase was 0 we don't need to retract anything
1468 # if minimal phase was 0 we don't need to retract anything
1469 phases.retractboundary(self, targetphase, [n])
1469 phases.retractboundary(self, targetphase, [n])
1470 tr.close()
1470 tr.close()
1471 self.updatebranchcache()
1471 self.updatebranchcache()
1472 return n
1472 return n
1473 finally:
1473 finally:
1474 if tr:
1474 if tr:
1475 tr.release()
1475 tr.release()
1476 lock.release()
1476 lock.release()
1477
1477
1478 def destroyed(self, newheadnodes=None):
1478 def destroyed(self, newheadnodes=None):
1479 '''Inform the repository that nodes have been destroyed.
1479 '''Inform the repository that nodes have been destroyed.
1480 Intended for use by strip and rollback, so there's a common
1480 Intended for use by strip and rollback, so there's a common
1481 place for anything that has to be done after destroying history.
1481 place for anything that has to be done after destroying history.
1482
1482
1483 If you know the branchheadcache was uptodate before nodes were removed
1483 If you know the branchheadcache was uptodate before nodes were removed
1484 and you also know the set of candidate new heads that may have resulted
1484 and you also know the set of candidate new heads that may have resulted
1485 from the destruction, you can set newheadnodes. This will enable the
1485 from the destruction, you can set newheadnodes. This will enable the
1486 code to update the branchheads cache, rather than having future code
1486 code to update the branchheads cache, rather than having future code
1487 decide it's invalid and regenerating it from scratch.
1487 decide it's invalid and regenerating it from scratch.
1488 '''
1488 '''
1489 # If we have info, newheadnodes, on how to update the branch cache, do
1489 # If we have info, newheadnodes, on how to update the branch cache, do
1490 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1490 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1491 # will be caught the next time it is read.
1491 # will be caught the next time it is read.
1492 if newheadnodes:
1492 if newheadnodes:
1493 tiprev = len(self) - 1
1493 tiprev = len(self) - 1
1494 ctxgen = (self[node] for node in newheadnodes
1494 ctxgen = (self[node] for node in newheadnodes
1495 if self.changelog.hasnode(node))
1495 if self.changelog.hasnode(node))
1496 self._updatebranchcache(self._branchcache, ctxgen)
1496 self._updatebranchcache(self._branchcache, ctxgen)
1497 self._writebranchcache(self._branchcache, self.changelog.tip(),
1497 self._writebranchcache(self._branchcache, self.changelog.tip(),
1498 tiprev)
1498 tiprev)
1499
1499
1500 # Ensure the persistent tag cache is updated. Doing it now
1500 # Ensure the persistent tag cache is updated. Doing it now
1501 # means that the tag cache only has to worry about destroyed
1501 # means that the tag cache only has to worry about destroyed
1502 # heads immediately after a strip/rollback. That in turn
1502 # heads immediately after a strip/rollback. That in turn
1503 # guarantees that "cachetip == currenttip" (comparing both rev
1503 # guarantees that "cachetip == currenttip" (comparing both rev
1504 # and node) always means no nodes have been added or destroyed.
1504 # and node) always means no nodes have been added or destroyed.
1505
1505
1506 # XXX this is suboptimal when qrefresh'ing: we strip the current
1506 # XXX this is suboptimal when qrefresh'ing: we strip the current
1507 # head, refresh the tag cache, then immediately add a new head.
1507 # head, refresh the tag cache, then immediately add a new head.
1508 # But I think doing it this way is necessary for the "instant
1508 # But I think doing it this way is necessary for the "instant
1509 # tag cache retrieval" case to work.
1509 # tag cache retrieval" case to work.
1510 self.invalidatecaches()
1510 self.invalidatecaches()
1511
1511
1512 # Discard all cache entries to force reloading everything.
1512 # Discard all cache entries to force reloading everything.
1513 self._filecache.clear()
1513 self._filecache.clear()
1514
1514
1515 def walk(self, match, node=None):
1515 def walk(self, match, node=None):
1516 '''
1516 '''
1517 walk recursively through the directory tree or a given
1517 walk recursively through the directory tree or a given
1518 changeset, finding all files matched by the match
1518 changeset, finding all files matched by the match
1519 function
1519 function
1520 '''
1520 '''
1521 return self[node].walk(match)
1521 return self[node].walk(match)
1522
1522
1523 def status(self, node1='.', node2=None, match=None,
1523 def status(self, node1='.', node2=None, match=None,
1524 ignored=False, clean=False, unknown=False,
1524 ignored=False, clean=False, unknown=False,
1525 listsubrepos=False):
1525 listsubrepos=False):
1526 """return status of files between two nodes or node and working
1526 """return status of files between two nodes or node and working
1527 directory.
1527 directory.
1528
1528
1529 If node1 is None, use the first dirstate parent instead.
1529 If node1 is None, use the first dirstate parent instead.
1530 If node2 is None, compare node1 with working directory.
1530 If node2 is None, compare node1 with working directory.
1531 """
1531 """
1532
1532
1533 def mfmatches(ctx):
1533 def mfmatches(ctx):
1534 mf = ctx.manifest().copy()
1534 mf = ctx.manifest().copy()
1535 if match.always():
1535 if match.always():
1536 return mf
1536 return mf
1537 for fn in mf.keys():
1537 for fn in mf.keys():
1538 if not match(fn):
1538 if not match(fn):
1539 del mf[fn]
1539 del mf[fn]
1540 return mf
1540 return mf
1541
1541
1542 if isinstance(node1, context.changectx):
1542 if isinstance(node1, context.changectx):
1543 ctx1 = node1
1543 ctx1 = node1
1544 else:
1544 else:
1545 ctx1 = self[node1]
1545 ctx1 = self[node1]
1546 if isinstance(node2, context.changectx):
1546 if isinstance(node2, context.changectx):
1547 ctx2 = node2
1547 ctx2 = node2
1548 else:
1548 else:
1549 ctx2 = self[node2]
1549 ctx2 = self[node2]
1550
1550
1551 working = ctx2.rev() is None
1551 working = ctx2.rev() is None
1552 parentworking = working and ctx1 == self['.']
1552 parentworking = working and ctx1 == self['.']
1553 match = match or matchmod.always(self.root, self.getcwd())
1553 match = match or matchmod.always(self.root, self.getcwd())
1554 listignored, listclean, listunknown = ignored, clean, unknown
1554 listignored, listclean, listunknown = ignored, clean, unknown
1555
1555
1556 # load earliest manifest first for caching reasons
1556 # load earliest manifest first for caching reasons
1557 if not working and ctx2.rev() < ctx1.rev():
1557 if not working and ctx2.rev() < ctx1.rev():
1558 ctx2.manifest()
1558 ctx2.manifest()
1559
1559
1560 if not parentworking:
1560 if not parentworking:
1561 def bad(f, msg):
1561 def bad(f, msg):
1562 # 'f' may be a directory pattern from 'match.files()',
1562 # 'f' may be a directory pattern from 'match.files()',
1563 # so 'f not in ctx1' is not enough
1563 # so 'f not in ctx1' is not enough
1564 if f not in ctx1 and f not in ctx1.dirs():
1564 if f not in ctx1 and f not in ctx1.dirs():
1565 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1565 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1566 match.bad = bad
1566 match.bad = bad
1567
1567
1568 if working: # we need to scan the working dir
1568 if working: # we need to scan the working dir
1569 subrepos = []
1569 subrepos = []
1570 if '.hgsub' in self.dirstate:
1570 if '.hgsub' in self.dirstate:
1571 subrepos = ctx2.substate.keys()
1571 subrepos = ctx2.substate.keys()
1572 s = self.dirstate.status(match, subrepos, listignored,
1572 s = self.dirstate.status(match, subrepos, listignored,
1573 listclean, listunknown)
1573 listclean, listunknown)
1574 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1574 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1575
1575
1576 # check for any possibly clean files
1576 # check for any possibly clean files
1577 if parentworking and cmp:
1577 if parentworking and cmp:
1578 fixup = []
1578 fixup = []
1579 # do a full compare of any files that might have changed
1579 # do a full compare of any files that might have changed
1580 for f in sorted(cmp):
1580 for f in sorted(cmp):
1581 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1581 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1582 or ctx1[f].cmp(ctx2[f])):
1582 or ctx1[f].cmp(ctx2[f])):
1583 modified.append(f)
1583 modified.append(f)
1584 else:
1584 else:
1585 fixup.append(f)
1585 fixup.append(f)
1586
1586
1587 # update dirstate for files that are actually clean
1587 # update dirstate for files that are actually clean
1588 if fixup:
1588 if fixup:
1589 if listclean:
1589 if listclean:
1590 clean += fixup
1590 clean += fixup
1591
1591
1592 try:
1592 try:
1593 # updating the dirstate is optional
1593 # updating the dirstate is optional
1594 # so we don't wait on the lock
1594 # so we don't wait on the lock
1595 wlock = self.wlock(False)
1595 wlock = self.wlock(False)
1596 try:
1596 try:
1597 for f in fixup:
1597 for f in fixup:
1598 self.dirstate.normal(f)
1598 self.dirstate.normal(f)
1599 finally:
1599 finally:
1600 wlock.release()
1600 wlock.release()
1601 except error.LockError:
1601 except error.LockError:
1602 pass
1602 pass
1603
1603
1604 if not parentworking:
1604 if not parentworking:
1605 mf1 = mfmatches(ctx1)
1605 mf1 = mfmatches(ctx1)
1606 if working:
1606 if working:
1607 # we are comparing working dir against non-parent
1607 # we are comparing working dir against non-parent
1608 # generate a pseudo-manifest for the working dir
1608 # generate a pseudo-manifest for the working dir
1609 mf2 = mfmatches(self['.'])
1609 mf2 = mfmatches(self['.'])
1610 for f in cmp + modified + added:
1610 for f in cmp + modified + added:
1611 mf2[f] = None
1611 mf2[f] = None
1612 mf2.set(f, ctx2.flags(f))
1612 mf2.set(f, ctx2.flags(f))
1613 for f in removed:
1613 for f in removed:
1614 if f in mf2:
1614 if f in mf2:
1615 del mf2[f]
1615 del mf2[f]
1616 else:
1616 else:
1617 # we are comparing two revisions
1617 # we are comparing two revisions
1618 deleted, unknown, ignored = [], [], []
1618 deleted, unknown, ignored = [], [], []
1619 mf2 = mfmatches(ctx2)
1619 mf2 = mfmatches(ctx2)
1620
1620
1621 modified, added, clean = [], [], []
1621 modified, added, clean = [], [], []
1622 withflags = mf1.withflags() | mf2.withflags()
1622 withflags = mf1.withflags() | mf2.withflags()
1623 for fn in mf2:
1623 for fn in mf2:
1624 if fn in mf1:
1624 if fn in mf1:
1625 if (fn not in deleted and
1625 if (fn not in deleted and
1626 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1626 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1627 (mf1[fn] != mf2[fn] and
1627 (mf1[fn] != mf2[fn] and
1628 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1628 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1629 modified.append(fn)
1629 modified.append(fn)
1630 elif listclean:
1630 elif listclean:
1631 clean.append(fn)
1631 clean.append(fn)
1632 del mf1[fn]
1632 del mf1[fn]
1633 elif fn not in deleted:
1633 elif fn not in deleted:
1634 added.append(fn)
1634 added.append(fn)
1635 removed = mf1.keys()
1635 removed = mf1.keys()
1636
1636
1637 if working and modified and not self.dirstate._checklink:
1637 if working and modified and not self.dirstate._checklink:
1638 # Symlink placeholders may get non-symlink-like contents
1638 # Symlink placeholders may get non-symlink-like contents
1639 # via user error or dereferencing by NFS or Samba servers,
1639 # via user error or dereferencing by NFS or Samba servers,
1640 # so we filter out any placeholders that don't look like a
1640 # so we filter out any placeholders that don't look like a
1641 # symlink
1641 # symlink
1642 sane = []
1642 sane = []
1643 for f in modified:
1643 for f in modified:
1644 if ctx2.flags(f) == 'l':
1644 if ctx2.flags(f) == 'l':
1645 d = ctx2[f].data()
1645 d = ctx2[f].data()
1646 if len(d) >= 1024 or '\n' in d or util.binary(d):
1646 if len(d) >= 1024 or '\n' in d or util.binary(d):
1647 self.ui.debug('ignoring suspect symlink placeholder'
1647 self.ui.debug('ignoring suspect symlink placeholder'
1648 ' "%s"\n' % f)
1648 ' "%s"\n' % f)
1649 continue
1649 continue
1650 sane.append(f)
1650 sane.append(f)
1651 modified = sane
1651 modified = sane
1652
1652
1653 r = modified, added, removed, deleted, unknown, ignored, clean
1653 r = modified, added, removed, deleted, unknown, ignored, clean
1654
1654
1655 if listsubrepos:
1655 if listsubrepos:
1656 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1656 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1657 if working:
1657 if working:
1658 rev2 = None
1658 rev2 = None
1659 else:
1659 else:
1660 rev2 = ctx2.substate[subpath][1]
1660 rev2 = ctx2.substate[subpath][1]
1661 try:
1661 try:
1662 submatch = matchmod.narrowmatcher(subpath, match)
1662 submatch = matchmod.narrowmatcher(subpath, match)
1663 s = sub.status(rev2, match=submatch, ignored=listignored,
1663 s = sub.status(rev2, match=submatch, ignored=listignored,
1664 clean=listclean, unknown=listunknown,
1664 clean=listclean, unknown=listunknown,
1665 listsubrepos=True)
1665 listsubrepos=True)
1666 for rfiles, sfiles in zip(r, s):
1666 for rfiles, sfiles in zip(r, s):
1667 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1667 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1668 except error.LookupError:
1668 except error.LookupError:
1669 self.ui.status(_("skipping missing subrepository: %s\n")
1669 self.ui.status(_("skipping missing subrepository: %s\n")
1670 % subpath)
1670 % subpath)
1671
1671
1672 for l in r:
1672 for l in r:
1673 l.sort()
1673 l.sort()
1674 return r
1674 return r
1675
1675
1676 def heads(self, start=None):
1676 def heads(self, start=None):
1677 heads = self.changelog.heads(start)
1677 heads = self.changelog.heads(start)
1678 # sort the output in rev descending order
1678 # sort the output in rev descending order
1679 return sorted(heads, key=self.changelog.rev, reverse=True)
1679 return sorted(heads, key=self.changelog.rev, reverse=True)
1680
1680
1681 def branchheads(self, branch=None, start=None, closed=False):
1681 def branchheads(self, branch=None, start=None, closed=False):
1682 '''return a (possibly filtered) list of heads for the given branch
1682 '''return a (possibly filtered) list of heads for the given branch
1683
1683
1684 Heads are returned in topological order, from newest to oldest.
1684 Heads are returned in topological order, from newest to oldest.
1685 If branch is None, use the dirstate branch.
1685 If branch is None, use the dirstate branch.
1686 If start is not None, return only heads reachable from start.
1686 If start is not None, return only heads reachable from start.
1687 If closed is True, return heads that are marked as closed as well.
1687 If closed is True, return heads that are marked as closed as well.
1688 '''
1688 '''
1689 if branch is None:
1689 if branch is None:
1690 branch = self[None].branch()
1690 branch = self[None].branch()
1691 branches = self.branchmap()
1691 branches = self.branchmap()
1692 if branch not in branches:
1692 if branch not in branches:
1693 return []
1693 return []
1694 # the cache returns heads ordered lowest to highest
1694 # the cache returns heads ordered lowest to highest
1695 bheads = list(reversed(branches[branch]))
1695 bheads = list(reversed(branches[branch]))
1696 if start is not None:
1696 if start is not None:
1697 # filter out the heads that cannot be reached from startrev
1697 # filter out the heads that cannot be reached from startrev
1698 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1698 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1699 bheads = [h for h in bheads if h in fbheads]
1699 bheads = [h for h in bheads if h in fbheads]
1700 if not closed:
1700 if not closed:
1701 bheads = [h for h in bheads if not self[h].closesbranch()]
1701 bheads = [h for h in bheads if not self[h].closesbranch()]
1702 return bheads
1702 return bheads
1703
1703
1704 def branches(self, nodes):
1704 def branches(self, nodes):
1705 if not nodes:
1705 if not nodes:
1706 nodes = [self.changelog.tip()]
1706 nodes = [self.changelog.tip()]
1707 b = []
1707 b = []
1708 for n in nodes:
1708 for n in nodes:
1709 t = n
1709 t = n
1710 while True:
1710 while True:
1711 p = self.changelog.parents(n)
1711 p = self.changelog.parents(n)
1712 if p[1] != nullid or p[0] == nullid:
1712 if p[1] != nullid or p[0] == nullid:
1713 b.append((t, n, p[0], p[1]))
1713 b.append((t, n, p[0], p[1]))
1714 break
1714 break
1715 n = p[0]
1715 n = p[0]
1716 return b
1716 return b
1717
1717
1718 def between(self, pairs):
1718 def between(self, pairs):
1719 r = []
1719 r = []
1720
1720
1721 for top, bottom in pairs:
1721 for top, bottom in pairs:
1722 n, l, i = top, [], 0
1722 n, l, i = top, [], 0
1723 f = 1
1723 f = 1
1724
1724
1725 while n != bottom and n != nullid:
1725 while n != bottom and n != nullid:
1726 p = self.changelog.parents(n)[0]
1726 p = self.changelog.parents(n)[0]
1727 if i == f:
1727 if i == f:
1728 l.append(n)
1728 l.append(n)
1729 f = f * 2
1729 f = f * 2
1730 n = p
1730 n = p
1731 i += 1
1731 i += 1
1732
1732
1733 r.append(l)
1733 r.append(l)
1734
1734
1735 return r
1735 return r
1736
1736
1737 def pull(self, remote, heads=None, force=False):
1737 def pull(self, remote, heads=None, force=False):
1738 # don't open transaction for nothing or you break future useful
1738 # don't open transaction for nothing or you break future useful
1739 # rollback call
1739 # rollback call
1740 tr = None
1740 tr = None
1741 trname = 'pull\n' + util.hidepassword(remote.url())
1741 trname = 'pull\n' + util.hidepassword(remote.url())
1742 lock = self.lock()
1742 lock = self.lock()
1743 try:
1743 try:
1744 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1744 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1745 force=force)
1745 force=force)
1746 common, fetch, rheads = tmp
1746 common, fetch, rheads = tmp
1747 if not fetch:
1747 if not fetch:
1748 self.ui.status(_("no changes found\n"))
1748 self.ui.status(_("no changes found\n"))
1749 added = []
1749 added = []
1750 result = 0
1750 result = 0
1751 else:
1751 else:
1752 tr = self.transaction(trname)
1752 tr = self.transaction(trname)
1753 if heads is None and list(common) == [nullid]:
1753 if heads is None and list(common) == [nullid]:
1754 self.ui.status(_("requesting all changes\n"))
1754 self.ui.status(_("requesting all changes\n"))
1755 elif heads is None and remote.capable('changegroupsubset'):
1755 elif heads is None and remote.capable('changegroupsubset'):
1756 # issue1320, avoid a race if remote changed after discovery
1756 # issue1320, avoid a race if remote changed after discovery
1757 heads = rheads
1757 heads = rheads
1758
1758
1759 if remote.capable('getbundle'):
1759 if remote.capable('getbundle'):
1760 cg = remote.getbundle('pull', common=common,
1760 cg = remote.getbundle('pull', common=common,
1761 heads=heads or rheads)
1761 heads=heads or rheads)
1762 elif heads is None:
1762 elif heads is None:
1763 cg = remote.changegroup(fetch, 'pull')
1763 cg = remote.changegroup(fetch, 'pull')
1764 elif not remote.capable('changegroupsubset'):
1764 elif not remote.capable('changegroupsubset'):
1765 raise util.Abort(_("partial pull cannot be done because "
1765 raise util.Abort(_("partial pull cannot be done because "
1766 "other repository doesn't support "
1766 "other repository doesn't support "
1767 "changegroupsubset."))
1767 "changegroupsubset."))
1768 else:
1768 else:
1769 cg = remote.changegroupsubset(fetch, heads, 'pull')
1769 cg = remote.changegroupsubset(fetch, heads, 'pull')
1770 clstart = len(self.changelog)
1770 clstart = len(self.changelog)
1771 result = self.addchangegroup(cg, 'pull', remote.url())
1771 result = self.addchangegroup(cg, 'pull', remote.url())
1772 clend = len(self.changelog)
1772 clend = len(self.changelog)
1773 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1773 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1774
1774
1775 # compute target subset
1775 # compute target subset
1776 if heads is None:
1776 if heads is None:
1777 # We pulled every thing possible
1777 # We pulled every thing possible
1778 # sync on everything common
1778 # sync on everything common
1779 subset = common + added
1779 subset = common + added
1780 else:
1780 else:
1781 # We pulled a specific subset
1781 # We pulled a specific subset
1782 # sync on this subset
1782 # sync on this subset
1783 subset = heads
1783 subset = heads
1784
1784
1785 # Get remote phases data from remote
1785 # Get remote phases data from remote
1786 remotephases = remote.listkeys('phases')
1786 remotephases = remote.listkeys('phases')
1787 publishing = bool(remotephases.get('publishing', False))
1787 publishing = bool(remotephases.get('publishing', False))
1788 if remotephases and not publishing:
1788 if remotephases and not publishing:
1789 # remote is new and unpublishing
1789 # remote is new and unpublishing
1790 pheads, _dr = phases.analyzeremotephases(self, subset,
1790 pheads, _dr = phases.analyzeremotephases(self, subset,
1791 remotephases)
1791 remotephases)
1792 phases.advanceboundary(self, phases.public, pheads)
1792 phases.advanceboundary(self, phases.public, pheads)
1793 phases.advanceboundary(self, phases.draft, subset)
1793 phases.advanceboundary(self, phases.draft, subset)
1794 else:
1794 else:
1795 # Remote is old or publishing all common changesets
1795 # Remote is old or publishing all common changesets
1796 # should be seen as public
1796 # should be seen as public
1797 phases.advanceboundary(self, phases.public, subset)
1797 phases.advanceboundary(self, phases.public, subset)
1798
1798
1799 if obsolete._enabled:
1799 if obsolete._enabled:
1800 self.ui.debug('fetching remote obsolete markers')
1800 self.ui.debug('fetching remote obsolete markers')
1801 remoteobs = remote.listkeys('obsolete')
1801 remoteobs = remote.listkeys('obsolete')
1802 if 'dump0' in remoteobs:
1802 if 'dump0' in remoteobs:
1803 if tr is None:
1803 if tr is None:
1804 tr = self.transaction(trname)
1804 tr = self.transaction(trname)
1805 for key in sorted(remoteobs, reverse=True):
1805 for key in sorted(remoteobs, reverse=True):
1806 if key.startswith('dump'):
1806 if key.startswith('dump'):
1807 data = base85.b85decode(remoteobs[key])
1807 data = base85.b85decode(remoteobs[key])
1808 self.obsstore.mergemarkers(tr, data)
1808 self.obsstore.mergemarkers(tr, data)
1809 if tr is not None:
1809 if tr is not None:
1810 tr.close()
1810 tr.close()
1811 finally:
1811 finally:
1812 if tr is not None:
1812 if tr is not None:
1813 tr.release()
1813 tr.release()
1814 lock.release()
1814 lock.release()
1815
1815
1816 return result
1816 return result
1817
1817
1818 def checkpush(self, force, revs):
1818 def checkpush(self, force, revs):
1819 """Extensions can override this function if additional checks have
1819 """Extensions can override this function if additional checks have
1820 to be performed before pushing, or call it if they override push
1820 to be performed before pushing, or call it if they override push
1821 command.
1821 command.
1822 """
1822 """
1823 pass
1823 pass
1824
1824
1825 def push(self, remote, force=False, revs=None, newbranch=False):
1825 def push(self, remote, force=False, revs=None, newbranch=False):
1826 '''Push outgoing changesets (limited by revs) from the current
1826 '''Push outgoing changesets (limited by revs) from the current
1827 repository to remote. Return an integer:
1827 repository to remote. Return an integer:
1828 - None means nothing to push
1828 - None means nothing to push
1829 - 0 means HTTP error
1829 - 0 means HTTP error
1830 - 1 means we pushed and remote head count is unchanged *or*
1830 - 1 means we pushed and remote head count is unchanged *or*
1831 we have outgoing changesets but refused to push
1831 we have outgoing changesets but refused to push
1832 - other values as described by addchangegroup()
1832 - other values as described by addchangegroup()
1833 '''
1833 '''
1834 # there are two ways to push to remote repo:
1834 # there are two ways to push to remote repo:
1835 #
1835 #
1836 # addchangegroup assumes local user can lock remote
1836 # addchangegroup assumes local user can lock remote
1837 # repo (local filesystem, old ssh servers).
1837 # repo (local filesystem, old ssh servers).
1838 #
1838 #
1839 # unbundle assumes local user cannot lock remote repo (new ssh
1839 # unbundle assumes local user cannot lock remote repo (new ssh
1840 # servers, http servers).
1840 # servers, http servers).
1841
1841
1842 if not remote.canpush():
1842 if not remote.canpush():
1843 raise util.Abort(_("destination does not support push"))
1843 raise util.Abort(_("destination does not support push"))
1844 # get local lock as we might write phase data
1844 # get local lock as we might write phase data
1845 locallock = self.lock()
1845 locallock = self.lock()
1846 try:
1846 try:
1847 self.checkpush(force, revs)
1847 self.checkpush(force, revs)
1848 lock = None
1848 lock = None
1849 unbundle = remote.capable('unbundle')
1849 unbundle = remote.capable('unbundle')
1850 if not unbundle:
1850 if not unbundle:
1851 lock = remote.lock()
1851 lock = remote.lock()
1852 try:
1852 try:
1853 # discovery
1853 # discovery
1854 fci = discovery.findcommonincoming
1854 fci = discovery.findcommonincoming
1855 commoninc = fci(self, remote, force=force)
1855 commoninc = fci(self, remote, force=force)
1856 common, inc, remoteheads = commoninc
1856 common, inc, remoteheads = commoninc
1857 fco = discovery.findcommonoutgoing
1857 fco = discovery.findcommonoutgoing
1858 outgoing = fco(self, remote, onlyheads=revs,
1858 outgoing = fco(self, remote, onlyheads=revs,
1859 commoninc=commoninc, force=force)
1859 commoninc=commoninc, force=force)
1860
1860
1861
1861
1862 if not outgoing.missing:
1862 if not outgoing.missing:
1863 # nothing to push
1863 # nothing to push
1864 scmutil.nochangesfound(self.ui, self, outgoing.excluded)
1864 scmutil.nochangesfound(self.ui, self, outgoing.excluded)
1865 ret = None
1865 ret = None
1866 else:
1866 else:
1867 # something to push
1867 # something to push
1868 if not force:
1868 if not force:
1869 # if self.obsstore == False --> no obsolete
1869 # if self.obsstore == False --> no obsolete
1870 # then, save the iteration
1870 # then, save the iteration
1871 if self.obsstore:
1871 if self.obsstore:
1872 # this message are here for 80 char limit reason
1872 # this message are here for 80 char limit reason
1873 mso = _("push includes obsolete changeset: %s!")
1873 mso = _("push includes obsolete changeset: %s!")
1874 msu = _("push includes unstable changeset: %s!")
1874 msu = _("push includes unstable changeset: %s!")
1875 msb = _("push includes bumped changeset: %s!")
1875 # If we are to push if there is at least one
1876 # If we are to push if there is at least one
1876 # obsolete or unstable changeset in missing, at
1877 # obsolete or unstable changeset in missing, at
1877 # least one of the missinghead will be obsolete or
1878 # least one of the missinghead will be obsolete or
1878 # unstable. So checking heads only is ok
1879 # unstable. So checking heads only is ok
1879 for node in outgoing.missingheads:
1880 for node in outgoing.missingheads:
1880 ctx = self[node]
1881 ctx = self[node]
1881 if ctx.obsolete():
1882 if ctx.obsolete():
1882 raise util.Abort(_(mso) % ctx)
1883 raise util.Abort(_(mso) % ctx)
1883 elif ctx.unstable():
1884 elif ctx.unstable():
1884 raise util.Abort(_(msu) % ctx)
1885 raise util.Abort(_(msu) % ctx)
1886 elif ctx.bumped():
1887 raise util.Abort(_(msb) % ctx)
1885 discovery.checkheads(self, remote, outgoing,
1888 discovery.checkheads(self, remote, outgoing,
1886 remoteheads, newbranch,
1889 remoteheads, newbranch,
1887 bool(inc))
1890 bool(inc))
1888
1891
1889 # create a changegroup from local
1892 # create a changegroup from local
1890 if revs is None and not outgoing.excluded:
1893 if revs is None and not outgoing.excluded:
1891 # push everything,
1894 # push everything,
1892 # use the fast path, no race possible on push
1895 # use the fast path, no race possible on push
1893 cg = self._changegroup(outgoing.missing, 'push')
1896 cg = self._changegroup(outgoing.missing, 'push')
1894 else:
1897 else:
1895 cg = self.getlocalbundle('push', outgoing)
1898 cg = self.getlocalbundle('push', outgoing)
1896
1899
1897 # apply changegroup to remote
1900 # apply changegroup to remote
1898 if unbundle:
1901 if unbundle:
1899 # local repo finds heads on server, finds out what
1902 # local repo finds heads on server, finds out what
1900 # revs it must push. once revs transferred, if server
1903 # revs it must push. once revs transferred, if server
1901 # finds it has different heads (someone else won
1904 # finds it has different heads (someone else won
1902 # commit/push race), server aborts.
1905 # commit/push race), server aborts.
1903 if force:
1906 if force:
1904 remoteheads = ['force']
1907 remoteheads = ['force']
1905 # ssh: return remote's addchangegroup()
1908 # ssh: return remote's addchangegroup()
1906 # http: return remote's addchangegroup() or 0 for error
1909 # http: return remote's addchangegroup() or 0 for error
1907 ret = remote.unbundle(cg, remoteheads, 'push')
1910 ret = remote.unbundle(cg, remoteheads, 'push')
1908 else:
1911 else:
1909 # we return an integer indicating remote head count
1912 # we return an integer indicating remote head count
1910 # change
1913 # change
1911 ret = remote.addchangegroup(cg, 'push', self.url())
1914 ret = remote.addchangegroup(cg, 'push', self.url())
1912
1915
1913 if ret:
1916 if ret:
1914 # push succeed, synchronize target of the push
1917 # push succeed, synchronize target of the push
1915 cheads = outgoing.missingheads
1918 cheads = outgoing.missingheads
1916 elif revs is None:
1919 elif revs is None:
1917 # All out push fails. synchronize all common
1920 # All out push fails. synchronize all common
1918 cheads = outgoing.commonheads
1921 cheads = outgoing.commonheads
1919 else:
1922 else:
1920 # I want cheads = heads(::missingheads and ::commonheads)
1923 # I want cheads = heads(::missingheads and ::commonheads)
1921 # (missingheads is revs with secret changeset filtered out)
1924 # (missingheads is revs with secret changeset filtered out)
1922 #
1925 #
1923 # This can be expressed as:
1926 # This can be expressed as:
1924 # cheads = ( (missingheads and ::commonheads)
1927 # cheads = ( (missingheads and ::commonheads)
1925 # + (commonheads and ::missingheads))"
1928 # + (commonheads and ::missingheads))"
1926 # )
1929 # )
1927 #
1930 #
1928 # while trying to push we already computed the following:
1931 # while trying to push we already computed the following:
1929 # common = (::commonheads)
1932 # common = (::commonheads)
1930 # missing = ((commonheads::missingheads) - commonheads)
1933 # missing = ((commonheads::missingheads) - commonheads)
1931 #
1934 #
1932 # We can pick:
1935 # We can pick:
1933 # * missingheads part of common (::commonheads)
1936 # * missingheads part of common (::commonheads)
1934 common = set(outgoing.common)
1937 common = set(outgoing.common)
1935 cheads = [node for node in revs if node in common]
1938 cheads = [node for node in revs if node in common]
1936 # and
1939 # and
1937 # * commonheads parents on missing
1940 # * commonheads parents on missing
1938 revset = self.set('%ln and parents(roots(%ln))',
1941 revset = self.set('%ln and parents(roots(%ln))',
1939 outgoing.commonheads,
1942 outgoing.commonheads,
1940 outgoing.missing)
1943 outgoing.missing)
1941 cheads.extend(c.node() for c in revset)
1944 cheads.extend(c.node() for c in revset)
1942 # even when we don't push, exchanging phase data is useful
1945 # even when we don't push, exchanging phase data is useful
1943 remotephases = remote.listkeys('phases')
1946 remotephases = remote.listkeys('phases')
1944 if not remotephases: # old server or public only repo
1947 if not remotephases: # old server or public only repo
1945 phases.advanceboundary(self, phases.public, cheads)
1948 phases.advanceboundary(self, phases.public, cheads)
1946 # don't push any phase data as there is nothing to push
1949 # don't push any phase data as there is nothing to push
1947 else:
1950 else:
1948 ana = phases.analyzeremotephases(self, cheads, remotephases)
1951 ana = phases.analyzeremotephases(self, cheads, remotephases)
1949 pheads, droots = ana
1952 pheads, droots = ana
1950 ### Apply remote phase on local
1953 ### Apply remote phase on local
1951 if remotephases.get('publishing', False):
1954 if remotephases.get('publishing', False):
1952 phases.advanceboundary(self, phases.public, cheads)
1955 phases.advanceboundary(self, phases.public, cheads)
1953 else: # publish = False
1956 else: # publish = False
1954 phases.advanceboundary(self, phases.public, pheads)
1957 phases.advanceboundary(self, phases.public, pheads)
1955 phases.advanceboundary(self, phases.draft, cheads)
1958 phases.advanceboundary(self, phases.draft, cheads)
1956 ### Apply local phase on remote
1959 ### Apply local phase on remote
1957
1960
1958 # Get the list of all revs draft on remote by public here.
1961 # Get the list of all revs draft on remote by public here.
1959 # XXX Beware that revset break if droots is not strictly
1962 # XXX Beware that revset break if droots is not strictly
1960 # XXX root we may want to ensure it is but it is costly
1963 # XXX root we may want to ensure it is but it is costly
1961 outdated = self.set('heads((%ln::%ln) and public())',
1964 outdated = self.set('heads((%ln::%ln) and public())',
1962 droots, cheads)
1965 droots, cheads)
1963 for newremotehead in outdated:
1966 for newremotehead in outdated:
1964 r = remote.pushkey('phases',
1967 r = remote.pushkey('phases',
1965 newremotehead.hex(),
1968 newremotehead.hex(),
1966 str(phases.draft),
1969 str(phases.draft),
1967 str(phases.public))
1970 str(phases.public))
1968 if not r:
1971 if not r:
1969 self.ui.warn(_('updating %s to public failed!\n')
1972 self.ui.warn(_('updating %s to public failed!\n')
1970 % newremotehead)
1973 % newremotehead)
1971 self.ui.debug('try to push obsolete markers to remote\n')
1974 self.ui.debug('try to push obsolete markers to remote\n')
1972 if (obsolete._enabled and self.obsstore and
1975 if (obsolete._enabled and self.obsstore and
1973 'obsolete' in remote.listkeys('namespaces')):
1976 'obsolete' in remote.listkeys('namespaces')):
1974 rslts = []
1977 rslts = []
1975 remotedata = self.listkeys('obsolete')
1978 remotedata = self.listkeys('obsolete')
1976 for key in sorted(remotedata, reverse=True):
1979 for key in sorted(remotedata, reverse=True):
1977 # reverse sort to ensure we end with dump0
1980 # reverse sort to ensure we end with dump0
1978 data = remotedata[key]
1981 data = remotedata[key]
1979 rslts.append(remote.pushkey('obsolete', key, '', data))
1982 rslts.append(remote.pushkey('obsolete', key, '', data))
1980 if [r for r in rslts if not r]:
1983 if [r for r in rslts if not r]:
1981 msg = _('failed to push some obsolete markers!\n')
1984 msg = _('failed to push some obsolete markers!\n')
1982 self.ui.warn(msg)
1985 self.ui.warn(msg)
1983 finally:
1986 finally:
1984 if lock is not None:
1987 if lock is not None:
1985 lock.release()
1988 lock.release()
1986 finally:
1989 finally:
1987 locallock.release()
1990 locallock.release()
1988
1991
1989 self.ui.debug("checking for updated bookmarks\n")
1992 self.ui.debug("checking for updated bookmarks\n")
1990 rb = remote.listkeys('bookmarks')
1993 rb = remote.listkeys('bookmarks')
1991 for k in rb.keys():
1994 for k in rb.keys():
1992 if k in self._bookmarks:
1995 if k in self._bookmarks:
1993 nr, nl = rb[k], hex(self._bookmarks[k])
1996 nr, nl = rb[k], hex(self._bookmarks[k])
1994 if nr in self:
1997 if nr in self:
1995 cr = self[nr]
1998 cr = self[nr]
1996 cl = self[nl]
1999 cl = self[nl]
1997 if bookmarks.validdest(self, cr, cl):
2000 if bookmarks.validdest(self, cr, cl):
1998 r = remote.pushkey('bookmarks', k, nr, nl)
2001 r = remote.pushkey('bookmarks', k, nr, nl)
1999 if r:
2002 if r:
2000 self.ui.status(_("updating bookmark %s\n") % k)
2003 self.ui.status(_("updating bookmark %s\n") % k)
2001 else:
2004 else:
2002 self.ui.warn(_('updating bookmark %s'
2005 self.ui.warn(_('updating bookmark %s'
2003 ' failed!\n') % k)
2006 ' failed!\n') % k)
2004
2007
2005 return ret
2008 return ret
2006
2009
2007 def changegroupinfo(self, nodes, source):
2010 def changegroupinfo(self, nodes, source):
2008 if self.ui.verbose or source == 'bundle':
2011 if self.ui.verbose or source == 'bundle':
2009 self.ui.status(_("%d changesets found\n") % len(nodes))
2012 self.ui.status(_("%d changesets found\n") % len(nodes))
2010 if self.ui.debugflag:
2013 if self.ui.debugflag:
2011 self.ui.debug("list of changesets:\n")
2014 self.ui.debug("list of changesets:\n")
2012 for node in nodes:
2015 for node in nodes:
2013 self.ui.debug("%s\n" % hex(node))
2016 self.ui.debug("%s\n" % hex(node))
2014
2017
2015 def changegroupsubset(self, bases, heads, source):
2018 def changegroupsubset(self, bases, heads, source):
2016 """Compute a changegroup consisting of all the nodes that are
2019 """Compute a changegroup consisting of all the nodes that are
2017 descendants of any of the bases and ancestors of any of the heads.
2020 descendants of any of the bases and ancestors of any of the heads.
2018 Return a chunkbuffer object whose read() method will return
2021 Return a chunkbuffer object whose read() method will return
2019 successive changegroup chunks.
2022 successive changegroup chunks.
2020
2023
2021 It is fairly complex as determining which filenodes and which
2024 It is fairly complex as determining which filenodes and which
2022 manifest nodes need to be included for the changeset to be complete
2025 manifest nodes need to be included for the changeset to be complete
2023 is non-trivial.
2026 is non-trivial.
2024
2027
2025 Another wrinkle is doing the reverse, figuring out which changeset in
2028 Another wrinkle is doing the reverse, figuring out which changeset in
2026 the changegroup a particular filenode or manifestnode belongs to.
2029 the changegroup a particular filenode or manifestnode belongs to.
2027 """
2030 """
2028 cl = self.changelog
2031 cl = self.changelog
2029 if not bases:
2032 if not bases:
2030 bases = [nullid]
2033 bases = [nullid]
2031 csets, bases, heads = cl.nodesbetween(bases, heads)
2034 csets, bases, heads = cl.nodesbetween(bases, heads)
2032 # We assume that all ancestors of bases are known
2035 # We assume that all ancestors of bases are known
2033 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2036 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2034 return self._changegroupsubset(common, csets, heads, source)
2037 return self._changegroupsubset(common, csets, heads, source)
2035
2038
2036 def getlocalbundle(self, source, outgoing):
2039 def getlocalbundle(self, source, outgoing):
2037 """Like getbundle, but taking a discovery.outgoing as an argument.
2040 """Like getbundle, but taking a discovery.outgoing as an argument.
2038
2041
2039 This is only implemented for local repos and reuses potentially
2042 This is only implemented for local repos and reuses potentially
2040 precomputed sets in outgoing."""
2043 precomputed sets in outgoing."""
2041 if not outgoing.missing:
2044 if not outgoing.missing:
2042 return None
2045 return None
2043 return self._changegroupsubset(outgoing.common,
2046 return self._changegroupsubset(outgoing.common,
2044 outgoing.missing,
2047 outgoing.missing,
2045 outgoing.missingheads,
2048 outgoing.missingheads,
2046 source)
2049 source)
2047
2050
2048 def getbundle(self, source, heads=None, common=None):
2051 def getbundle(self, source, heads=None, common=None):
2049 """Like changegroupsubset, but returns the set difference between the
2052 """Like changegroupsubset, but returns the set difference between the
2050 ancestors of heads and the ancestors common.
2053 ancestors of heads and the ancestors common.
2051
2054
2052 If heads is None, use the local heads. If common is None, use [nullid].
2055 If heads is None, use the local heads. If common is None, use [nullid].
2053
2056
2054 The nodes in common might not all be known locally due to the way the
2057 The nodes in common might not all be known locally due to the way the
2055 current discovery protocol works.
2058 current discovery protocol works.
2056 """
2059 """
2057 cl = self.changelog
2060 cl = self.changelog
2058 if common:
2061 if common:
2059 nm = cl.nodemap
2062 nm = cl.nodemap
2060 common = [n for n in common if n in nm]
2063 common = [n for n in common if n in nm]
2061 else:
2064 else:
2062 common = [nullid]
2065 common = [nullid]
2063 if not heads:
2066 if not heads:
2064 heads = cl.heads()
2067 heads = cl.heads()
2065 return self.getlocalbundle(source,
2068 return self.getlocalbundle(source,
2066 discovery.outgoing(cl, common, heads))
2069 discovery.outgoing(cl, common, heads))
2067
2070
2068 def _changegroupsubset(self, commonrevs, csets, heads, source):
2071 def _changegroupsubset(self, commonrevs, csets, heads, source):
2069
2072
2070 cl = self.changelog
2073 cl = self.changelog
2071 mf = self.manifest
2074 mf = self.manifest
2072 mfs = {} # needed manifests
2075 mfs = {} # needed manifests
2073 fnodes = {} # needed file nodes
2076 fnodes = {} # needed file nodes
2074 changedfiles = set()
2077 changedfiles = set()
2075 fstate = ['', {}]
2078 fstate = ['', {}]
2076 count = [0, 0]
2079 count = [0, 0]
2077
2080
2078 # can we go through the fast path ?
2081 # can we go through the fast path ?
2079 heads.sort()
2082 heads.sort()
2080 if heads == sorted(self.heads()):
2083 if heads == sorted(self.heads()):
2081 return self._changegroup(csets, source)
2084 return self._changegroup(csets, source)
2082
2085
2083 # slow path
2086 # slow path
2084 self.hook('preoutgoing', throw=True, source=source)
2087 self.hook('preoutgoing', throw=True, source=source)
2085 self.changegroupinfo(csets, source)
2088 self.changegroupinfo(csets, source)
2086
2089
2087 # filter any nodes that claim to be part of the known set
2090 # filter any nodes that claim to be part of the known set
2088 def prune(revlog, missing):
2091 def prune(revlog, missing):
2089 rr, rl = revlog.rev, revlog.linkrev
2092 rr, rl = revlog.rev, revlog.linkrev
2090 return [n for n in missing
2093 return [n for n in missing
2091 if rl(rr(n)) not in commonrevs]
2094 if rl(rr(n)) not in commonrevs]
2092
2095
2093 progress = self.ui.progress
2096 progress = self.ui.progress
2094 _bundling = _('bundling')
2097 _bundling = _('bundling')
2095 _changesets = _('changesets')
2098 _changesets = _('changesets')
2096 _manifests = _('manifests')
2099 _manifests = _('manifests')
2097 _files = _('files')
2100 _files = _('files')
2098
2101
2099 def lookup(revlog, x):
2102 def lookup(revlog, x):
2100 if revlog == cl:
2103 if revlog == cl:
2101 c = cl.read(x)
2104 c = cl.read(x)
2102 changedfiles.update(c[3])
2105 changedfiles.update(c[3])
2103 mfs.setdefault(c[0], x)
2106 mfs.setdefault(c[0], x)
2104 count[0] += 1
2107 count[0] += 1
2105 progress(_bundling, count[0],
2108 progress(_bundling, count[0],
2106 unit=_changesets, total=count[1])
2109 unit=_changesets, total=count[1])
2107 return x
2110 return x
2108 elif revlog == mf:
2111 elif revlog == mf:
2109 clnode = mfs[x]
2112 clnode = mfs[x]
2110 mdata = mf.readfast(x)
2113 mdata = mf.readfast(x)
2111 for f, n in mdata.iteritems():
2114 for f, n in mdata.iteritems():
2112 if f in changedfiles:
2115 if f in changedfiles:
2113 fnodes[f].setdefault(n, clnode)
2116 fnodes[f].setdefault(n, clnode)
2114 count[0] += 1
2117 count[0] += 1
2115 progress(_bundling, count[0],
2118 progress(_bundling, count[0],
2116 unit=_manifests, total=count[1])
2119 unit=_manifests, total=count[1])
2117 return clnode
2120 return clnode
2118 else:
2121 else:
2119 progress(_bundling, count[0], item=fstate[0],
2122 progress(_bundling, count[0], item=fstate[0],
2120 unit=_files, total=count[1])
2123 unit=_files, total=count[1])
2121 return fstate[1][x]
2124 return fstate[1][x]
2122
2125
2123 bundler = changegroup.bundle10(lookup)
2126 bundler = changegroup.bundle10(lookup)
2124 reorder = self.ui.config('bundle', 'reorder', 'auto')
2127 reorder = self.ui.config('bundle', 'reorder', 'auto')
2125 if reorder == 'auto':
2128 if reorder == 'auto':
2126 reorder = None
2129 reorder = None
2127 else:
2130 else:
2128 reorder = util.parsebool(reorder)
2131 reorder = util.parsebool(reorder)
2129
2132
2130 def gengroup():
2133 def gengroup():
2131 # Create a changenode group generator that will call our functions
2134 # Create a changenode group generator that will call our functions
2132 # back to lookup the owning changenode and collect information.
2135 # back to lookup the owning changenode and collect information.
2133 count[:] = [0, len(csets)]
2136 count[:] = [0, len(csets)]
2134 for chunk in cl.group(csets, bundler, reorder=reorder):
2137 for chunk in cl.group(csets, bundler, reorder=reorder):
2135 yield chunk
2138 yield chunk
2136 progress(_bundling, None)
2139 progress(_bundling, None)
2137
2140
2138 # Create a generator for the manifestnodes that calls our lookup
2141 # Create a generator for the manifestnodes that calls our lookup
2139 # and data collection functions back.
2142 # and data collection functions back.
2140 for f in changedfiles:
2143 for f in changedfiles:
2141 fnodes[f] = {}
2144 fnodes[f] = {}
2142 count[:] = [0, len(mfs)]
2145 count[:] = [0, len(mfs)]
2143 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2146 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2144 yield chunk
2147 yield chunk
2145 progress(_bundling, None)
2148 progress(_bundling, None)
2146
2149
2147 mfs.clear()
2150 mfs.clear()
2148
2151
2149 # Go through all our files in order sorted by name.
2152 # Go through all our files in order sorted by name.
2150 count[:] = [0, len(changedfiles)]
2153 count[:] = [0, len(changedfiles)]
2151 for fname in sorted(changedfiles):
2154 for fname in sorted(changedfiles):
2152 filerevlog = self.file(fname)
2155 filerevlog = self.file(fname)
2153 if not len(filerevlog):
2156 if not len(filerevlog):
2154 raise util.Abort(_("empty or missing revlog for %s")
2157 raise util.Abort(_("empty or missing revlog for %s")
2155 % fname)
2158 % fname)
2156 fstate[0] = fname
2159 fstate[0] = fname
2157 fstate[1] = fnodes.pop(fname, {})
2160 fstate[1] = fnodes.pop(fname, {})
2158
2161
2159 nodelist = prune(filerevlog, fstate[1])
2162 nodelist = prune(filerevlog, fstate[1])
2160 if nodelist:
2163 if nodelist:
2161 count[0] += 1
2164 count[0] += 1
2162 yield bundler.fileheader(fname)
2165 yield bundler.fileheader(fname)
2163 for chunk in filerevlog.group(nodelist, bundler, reorder):
2166 for chunk in filerevlog.group(nodelist, bundler, reorder):
2164 yield chunk
2167 yield chunk
2165
2168
2166 # Signal that no more groups are left.
2169 # Signal that no more groups are left.
2167 yield bundler.close()
2170 yield bundler.close()
2168 progress(_bundling, None)
2171 progress(_bundling, None)
2169
2172
2170 if csets:
2173 if csets:
2171 self.hook('outgoing', node=hex(csets[0]), source=source)
2174 self.hook('outgoing', node=hex(csets[0]), source=source)
2172
2175
2173 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2176 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2174
2177
2175 def changegroup(self, basenodes, source):
2178 def changegroup(self, basenodes, source):
2176 # to avoid a race we use changegroupsubset() (issue1320)
2179 # to avoid a race we use changegroupsubset() (issue1320)
2177 return self.changegroupsubset(basenodes, self.heads(), source)
2180 return self.changegroupsubset(basenodes, self.heads(), source)
2178
2181
2179 def _changegroup(self, nodes, source):
2182 def _changegroup(self, nodes, source):
2180 """Compute the changegroup of all nodes that we have that a recipient
2183 """Compute the changegroup of all nodes that we have that a recipient
2181 doesn't. Return a chunkbuffer object whose read() method will return
2184 doesn't. Return a chunkbuffer object whose read() method will return
2182 successive changegroup chunks.
2185 successive changegroup chunks.
2183
2186
2184 This is much easier than the previous function as we can assume that
2187 This is much easier than the previous function as we can assume that
2185 the recipient has any changenode we aren't sending them.
2188 the recipient has any changenode we aren't sending them.
2186
2189
2187 nodes is the set of nodes to send"""
2190 nodes is the set of nodes to send"""
2188
2191
2189 cl = self.changelog
2192 cl = self.changelog
2190 mf = self.manifest
2193 mf = self.manifest
2191 mfs = {}
2194 mfs = {}
2192 changedfiles = set()
2195 changedfiles = set()
2193 fstate = ['']
2196 fstate = ['']
2194 count = [0, 0]
2197 count = [0, 0]
2195
2198
2196 self.hook('preoutgoing', throw=True, source=source)
2199 self.hook('preoutgoing', throw=True, source=source)
2197 self.changegroupinfo(nodes, source)
2200 self.changegroupinfo(nodes, source)
2198
2201
2199 revset = set([cl.rev(n) for n in nodes])
2202 revset = set([cl.rev(n) for n in nodes])
2200
2203
2201 def gennodelst(log):
2204 def gennodelst(log):
2202 ln, llr = log.node, log.linkrev
2205 ln, llr = log.node, log.linkrev
2203 return [ln(r) for r in log if llr(r) in revset]
2206 return [ln(r) for r in log if llr(r) in revset]
2204
2207
2205 progress = self.ui.progress
2208 progress = self.ui.progress
2206 _bundling = _('bundling')
2209 _bundling = _('bundling')
2207 _changesets = _('changesets')
2210 _changesets = _('changesets')
2208 _manifests = _('manifests')
2211 _manifests = _('manifests')
2209 _files = _('files')
2212 _files = _('files')
2210
2213
2211 def lookup(revlog, x):
2214 def lookup(revlog, x):
2212 if revlog == cl:
2215 if revlog == cl:
2213 c = cl.read(x)
2216 c = cl.read(x)
2214 changedfiles.update(c[3])
2217 changedfiles.update(c[3])
2215 mfs.setdefault(c[0], x)
2218 mfs.setdefault(c[0], x)
2216 count[0] += 1
2219 count[0] += 1
2217 progress(_bundling, count[0],
2220 progress(_bundling, count[0],
2218 unit=_changesets, total=count[1])
2221 unit=_changesets, total=count[1])
2219 return x
2222 return x
2220 elif revlog == mf:
2223 elif revlog == mf:
2221 count[0] += 1
2224 count[0] += 1
2222 progress(_bundling, count[0],
2225 progress(_bundling, count[0],
2223 unit=_manifests, total=count[1])
2226 unit=_manifests, total=count[1])
2224 return cl.node(revlog.linkrev(revlog.rev(x)))
2227 return cl.node(revlog.linkrev(revlog.rev(x)))
2225 else:
2228 else:
2226 progress(_bundling, count[0], item=fstate[0],
2229 progress(_bundling, count[0], item=fstate[0],
2227 total=count[1], unit=_files)
2230 total=count[1], unit=_files)
2228 return cl.node(revlog.linkrev(revlog.rev(x)))
2231 return cl.node(revlog.linkrev(revlog.rev(x)))
2229
2232
2230 bundler = changegroup.bundle10(lookup)
2233 bundler = changegroup.bundle10(lookup)
2231 reorder = self.ui.config('bundle', 'reorder', 'auto')
2234 reorder = self.ui.config('bundle', 'reorder', 'auto')
2232 if reorder == 'auto':
2235 if reorder == 'auto':
2233 reorder = None
2236 reorder = None
2234 else:
2237 else:
2235 reorder = util.parsebool(reorder)
2238 reorder = util.parsebool(reorder)
2236
2239
2237 def gengroup():
2240 def gengroup():
2238 '''yield a sequence of changegroup chunks (strings)'''
2241 '''yield a sequence of changegroup chunks (strings)'''
2239 # construct a list of all changed files
2242 # construct a list of all changed files
2240
2243
2241 count[:] = [0, len(nodes)]
2244 count[:] = [0, len(nodes)]
2242 for chunk in cl.group(nodes, bundler, reorder=reorder):
2245 for chunk in cl.group(nodes, bundler, reorder=reorder):
2243 yield chunk
2246 yield chunk
2244 progress(_bundling, None)
2247 progress(_bundling, None)
2245
2248
2246 count[:] = [0, len(mfs)]
2249 count[:] = [0, len(mfs)]
2247 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2250 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2248 yield chunk
2251 yield chunk
2249 progress(_bundling, None)
2252 progress(_bundling, None)
2250
2253
2251 count[:] = [0, len(changedfiles)]
2254 count[:] = [0, len(changedfiles)]
2252 for fname in sorted(changedfiles):
2255 for fname in sorted(changedfiles):
2253 filerevlog = self.file(fname)
2256 filerevlog = self.file(fname)
2254 if not len(filerevlog):
2257 if not len(filerevlog):
2255 raise util.Abort(_("empty or missing revlog for %s")
2258 raise util.Abort(_("empty or missing revlog for %s")
2256 % fname)
2259 % fname)
2257 fstate[0] = fname
2260 fstate[0] = fname
2258 nodelist = gennodelst(filerevlog)
2261 nodelist = gennodelst(filerevlog)
2259 if nodelist:
2262 if nodelist:
2260 count[0] += 1
2263 count[0] += 1
2261 yield bundler.fileheader(fname)
2264 yield bundler.fileheader(fname)
2262 for chunk in filerevlog.group(nodelist, bundler, reorder):
2265 for chunk in filerevlog.group(nodelist, bundler, reorder):
2263 yield chunk
2266 yield chunk
2264 yield bundler.close()
2267 yield bundler.close()
2265 progress(_bundling, None)
2268 progress(_bundling, None)
2266
2269
2267 if nodes:
2270 if nodes:
2268 self.hook('outgoing', node=hex(nodes[0]), source=source)
2271 self.hook('outgoing', node=hex(nodes[0]), source=source)
2269
2272
2270 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2273 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2271
2274
2272 def addchangegroup(self, source, srctype, url, emptyok=False):
2275 def addchangegroup(self, source, srctype, url, emptyok=False):
2273 """Add the changegroup returned by source.read() to this repo.
2276 """Add the changegroup returned by source.read() to this repo.
2274 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2277 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2275 the URL of the repo where this changegroup is coming from.
2278 the URL of the repo where this changegroup is coming from.
2276
2279
2277 Return an integer summarizing the change to this repo:
2280 Return an integer summarizing the change to this repo:
2278 - nothing changed or no source: 0
2281 - nothing changed or no source: 0
2279 - more heads than before: 1+added heads (2..n)
2282 - more heads than before: 1+added heads (2..n)
2280 - fewer heads than before: -1-removed heads (-2..-n)
2283 - fewer heads than before: -1-removed heads (-2..-n)
2281 - number of heads stays the same: 1
2284 - number of heads stays the same: 1
2282 """
2285 """
2283 def csmap(x):
2286 def csmap(x):
2284 self.ui.debug("add changeset %s\n" % short(x))
2287 self.ui.debug("add changeset %s\n" % short(x))
2285 return len(cl)
2288 return len(cl)
2286
2289
2287 def revmap(x):
2290 def revmap(x):
2288 return cl.rev(x)
2291 return cl.rev(x)
2289
2292
2290 if not source:
2293 if not source:
2291 return 0
2294 return 0
2292
2295
2293 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2296 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2294
2297
2295 changesets = files = revisions = 0
2298 changesets = files = revisions = 0
2296 efiles = set()
2299 efiles = set()
2297
2300
2298 # write changelog data to temp files so concurrent readers will not see
2301 # write changelog data to temp files so concurrent readers will not see
2299 # inconsistent view
2302 # inconsistent view
2300 cl = self.changelog
2303 cl = self.changelog
2301 cl.delayupdate()
2304 cl.delayupdate()
2302 oldheads = cl.heads()
2305 oldheads = cl.heads()
2303
2306
2304 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2307 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2305 try:
2308 try:
2306 trp = weakref.proxy(tr)
2309 trp = weakref.proxy(tr)
2307 # pull off the changeset group
2310 # pull off the changeset group
2308 self.ui.status(_("adding changesets\n"))
2311 self.ui.status(_("adding changesets\n"))
2309 clstart = len(cl)
2312 clstart = len(cl)
2310 class prog(object):
2313 class prog(object):
2311 step = _('changesets')
2314 step = _('changesets')
2312 count = 1
2315 count = 1
2313 ui = self.ui
2316 ui = self.ui
2314 total = None
2317 total = None
2315 def __call__(self):
2318 def __call__(self):
2316 self.ui.progress(self.step, self.count, unit=_('chunks'),
2319 self.ui.progress(self.step, self.count, unit=_('chunks'),
2317 total=self.total)
2320 total=self.total)
2318 self.count += 1
2321 self.count += 1
2319 pr = prog()
2322 pr = prog()
2320 source.callback = pr
2323 source.callback = pr
2321
2324
2322 source.changelogheader()
2325 source.changelogheader()
2323 srccontent = cl.addgroup(source, csmap, trp)
2326 srccontent = cl.addgroup(source, csmap, trp)
2324 if not (srccontent or emptyok):
2327 if not (srccontent or emptyok):
2325 raise util.Abort(_("received changelog group is empty"))
2328 raise util.Abort(_("received changelog group is empty"))
2326 clend = len(cl)
2329 clend = len(cl)
2327 changesets = clend - clstart
2330 changesets = clend - clstart
2328 for c in xrange(clstart, clend):
2331 for c in xrange(clstart, clend):
2329 efiles.update(self[c].files())
2332 efiles.update(self[c].files())
2330 efiles = len(efiles)
2333 efiles = len(efiles)
2331 self.ui.progress(_('changesets'), None)
2334 self.ui.progress(_('changesets'), None)
2332
2335
2333 # pull off the manifest group
2336 # pull off the manifest group
2334 self.ui.status(_("adding manifests\n"))
2337 self.ui.status(_("adding manifests\n"))
2335 pr.step = _('manifests')
2338 pr.step = _('manifests')
2336 pr.count = 1
2339 pr.count = 1
2337 pr.total = changesets # manifests <= changesets
2340 pr.total = changesets # manifests <= changesets
2338 # no need to check for empty manifest group here:
2341 # no need to check for empty manifest group here:
2339 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2342 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2340 # no new manifest will be created and the manifest group will
2343 # no new manifest will be created and the manifest group will
2341 # be empty during the pull
2344 # be empty during the pull
2342 source.manifestheader()
2345 source.manifestheader()
2343 self.manifest.addgroup(source, revmap, trp)
2346 self.manifest.addgroup(source, revmap, trp)
2344 self.ui.progress(_('manifests'), None)
2347 self.ui.progress(_('manifests'), None)
2345
2348
2346 needfiles = {}
2349 needfiles = {}
2347 if self.ui.configbool('server', 'validate', default=False):
2350 if self.ui.configbool('server', 'validate', default=False):
2348 # validate incoming csets have their manifests
2351 # validate incoming csets have their manifests
2349 for cset in xrange(clstart, clend):
2352 for cset in xrange(clstart, clend):
2350 mfest = self.changelog.read(self.changelog.node(cset))[0]
2353 mfest = self.changelog.read(self.changelog.node(cset))[0]
2351 mfest = self.manifest.readdelta(mfest)
2354 mfest = self.manifest.readdelta(mfest)
2352 # store file nodes we must see
2355 # store file nodes we must see
2353 for f, n in mfest.iteritems():
2356 for f, n in mfest.iteritems():
2354 needfiles.setdefault(f, set()).add(n)
2357 needfiles.setdefault(f, set()).add(n)
2355
2358
2356 # process the files
2359 # process the files
2357 self.ui.status(_("adding file changes\n"))
2360 self.ui.status(_("adding file changes\n"))
2358 pr.step = _('files')
2361 pr.step = _('files')
2359 pr.count = 1
2362 pr.count = 1
2360 pr.total = efiles
2363 pr.total = efiles
2361 source.callback = None
2364 source.callback = None
2362
2365
2363 while True:
2366 while True:
2364 chunkdata = source.filelogheader()
2367 chunkdata = source.filelogheader()
2365 if not chunkdata:
2368 if not chunkdata:
2366 break
2369 break
2367 f = chunkdata["filename"]
2370 f = chunkdata["filename"]
2368 self.ui.debug("adding %s revisions\n" % f)
2371 self.ui.debug("adding %s revisions\n" % f)
2369 pr()
2372 pr()
2370 fl = self.file(f)
2373 fl = self.file(f)
2371 o = len(fl)
2374 o = len(fl)
2372 if not fl.addgroup(source, revmap, trp):
2375 if not fl.addgroup(source, revmap, trp):
2373 raise util.Abort(_("received file revlog group is empty"))
2376 raise util.Abort(_("received file revlog group is empty"))
2374 revisions += len(fl) - o
2377 revisions += len(fl) - o
2375 files += 1
2378 files += 1
2376 if f in needfiles:
2379 if f in needfiles:
2377 needs = needfiles[f]
2380 needs = needfiles[f]
2378 for new in xrange(o, len(fl)):
2381 for new in xrange(o, len(fl)):
2379 n = fl.node(new)
2382 n = fl.node(new)
2380 if n in needs:
2383 if n in needs:
2381 needs.remove(n)
2384 needs.remove(n)
2382 if not needs:
2385 if not needs:
2383 del needfiles[f]
2386 del needfiles[f]
2384 self.ui.progress(_('files'), None)
2387 self.ui.progress(_('files'), None)
2385
2388
2386 for f, needs in needfiles.iteritems():
2389 for f, needs in needfiles.iteritems():
2387 fl = self.file(f)
2390 fl = self.file(f)
2388 for n in needs:
2391 for n in needs:
2389 try:
2392 try:
2390 fl.rev(n)
2393 fl.rev(n)
2391 except error.LookupError:
2394 except error.LookupError:
2392 raise util.Abort(
2395 raise util.Abort(
2393 _('missing file data for %s:%s - run hg verify') %
2396 _('missing file data for %s:%s - run hg verify') %
2394 (f, hex(n)))
2397 (f, hex(n)))
2395
2398
2396 dh = 0
2399 dh = 0
2397 if oldheads:
2400 if oldheads:
2398 heads = cl.heads()
2401 heads = cl.heads()
2399 dh = len(heads) - len(oldheads)
2402 dh = len(heads) - len(oldheads)
2400 for h in heads:
2403 for h in heads:
2401 if h not in oldheads and self[h].closesbranch():
2404 if h not in oldheads and self[h].closesbranch():
2402 dh -= 1
2405 dh -= 1
2403 htext = ""
2406 htext = ""
2404 if dh:
2407 if dh:
2405 htext = _(" (%+d heads)") % dh
2408 htext = _(" (%+d heads)") % dh
2406
2409
2407 self.ui.status(_("added %d changesets"
2410 self.ui.status(_("added %d changesets"
2408 " with %d changes to %d files%s\n")
2411 " with %d changes to %d files%s\n")
2409 % (changesets, revisions, files, htext))
2412 % (changesets, revisions, files, htext))
2410 obsolete.clearobscaches(self)
2413 obsolete.clearobscaches(self)
2411
2414
2412 if changesets > 0:
2415 if changesets > 0:
2413 p = lambda: cl.writepending() and self.root or ""
2416 p = lambda: cl.writepending() and self.root or ""
2414 self.hook('pretxnchangegroup', throw=True,
2417 self.hook('pretxnchangegroup', throw=True,
2415 node=hex(cl.node(clstart)), source=srctype,
2418 node=hex(cl.node(clstart)), source=srctype,
2416 url=url, pending=p)
2419 url=url, pending=p)
2417
2420
2418 added = [cl.node(r) for r in xrange(clstart, clend)]
2421 added = [cl.node(r) for r in xrange(clstart, clend)]
2419 publishing = self.ui.configbool('phases', 'publish', True)
2422 publishing = self.ui.configbool('phases', 'publish', True)
2420 if srctype == 'push':
2423 if srctype == 'push':
2421 # Old server can not push the boundary themself.
2424 # Old server can not push the boundary themself.
2422 # New server won't push the boundary if changeset already
2425 # New server won't push the boundary if changeset already
2423 # existed locally as secrete
2426 # existed locally as secrete
2424 #
2427 #
2425 # We should not use added here but the list of all change in
2428 # We should not use added here but the list of all change in
2426 # the bundle
2429 # the bundle
2427 if publishing:
2430 if publishing:
2428 phases.advanceboundary(self, phases.public, srccontent)
2431 phases.advanceboundary(self, phases.public, srccontent)
2429 else:
2432 else:
2430 phases.advanceboundary(self, phases.draft, srccontent)
2433 phases.advanceboundary(self, phases.draft, srccontent)
2431 phases.retractboundary(self, phases.draft, added)
2434 phases.retractboundary(self, phases.draft, added)
2432 elif srctype != 'strip':
2435 elif srctype != 'strip':
2433 # publishing only alter behavior during push
2436 # publishing only alter behavior during push
2434 #
2437 #
2435 # strip should not touch boundary at all
2438 # strip should not touch boundary at all
2436 phases.retractboundary(self, phases.draft, added)
2439 phases.retractboundary(self, phases.draft, added)
2437
2440
2438 # make changelog see real files again
2441 # make changelog see real files again
2439 cl.finalize(trp)
2442 cl.finalize(trp)
2440
2443
2441 tr.close()
2444 tr.close()
2442
2445
2443 if changesets > 0:
2446 if changesets > 0:
2444 self.updatebranchcache()
2447 self.updatebranchcache()
2445 def runhooks():
2448 def runhooks():
2446 # forcefully update the on-disk branch cache
2449 # forcefully update the on-disk branch cache
2447 self.ui.debug("updating the branch cache\n")
2450 self.ui.debug("updating the branch cache\n")
2448 self.hook("changegroup", node=hex(cl.node(clstart)),
2451 self.hook("changegroup", node=hex(cl.node(clstart)),
2449 source=srctype, url=url)
2452 source=srctype, url=url)
2450
2453
2451 for n in added:
2454 for n in added:
2452 self.hook("incoming", node=hex(n), source=srctype,
2455 self.hook("incoming", node=hex(n), source=srctype,
2453 url=url)
2456 url=url)
2454 self._afterlock(runhooks)
2457 self._afterlock(runhooks)
2455
2458
2456 finally:
2459 finally:
2457 tr.release()
2460 tr.release()
2458 # never return 0 here:
2461 # never return 0 here:
2459 if dh < 0:
2462 if dh < 0:
2460 return dh - 1
2463 return dh - 1
2461 else:
2464 else:
2462 return dh + 1
2465 return dh + 1
2463
2466
2464 def stream_in(self, remote, requirements):
2467 def stream_in(self, remote, requirements):
2465 lock = self.lock()
2468 lock = self.lock()
2466 try:
2469 try:
2467 # Save remote branchmap. We will use it later
2470 # Save remote branchmap. We will use it later
2468 # to speed up branchcache creation
2471 # to speed up branchcache creation
2469 rbranchmap = None
2472 rbranchmap = None
2470 if remote.capable("branchmap"):
2473 if remote.capable("branchmap"):
2471 rbranchmap = remote.branchmap()
2474 rbranchmap = remote.branchmap()
2472
2475
2473 fp = remote.stream_out()
2476 fp = remote.stream_out()
2474 l = fp.readline()
2477 l = fp.readline()
2475 try:
2478 try:
2476 resp = int(l)
2479 resp = int(l)
2477 except ValueError:
2480 except ValueError:
2478 raise error.ResponseError(
2481 raise error.ResponseError(
2479 _('unexpected response from remote server:'), l)
2482 _('unexpected response from remote server:'), l)
2480 if resp == 1:
2483 if resp == 1:
2481 raise util.Abort(_('operation forbidden by server'))
2484 raise util.Abort(_('operation forbidden by server'))
2482 elif resp == 2:
2485 elif resp == 2:
2483 raise util.Abort(_('locking the remote repository failed'))
2486 raise util.Abort(_('locking the remote repository failed'))
2484 elif resp != 0:
2487 elif resp != 0:
2485 raise util.Abort(_('the server sent an unknown error code'))
2488 raise util.Abort(_('the server sent an unknown error code'))
2486 self.ui.status(_('streaming all changes\n'))
2489 self.ui.status(_('streaming all changes\n'))
2487 l = fp.readline()
2490 l = fp.readline()
2488 try:
2491 try:
2489 total_files, total_bytes = map(int, l.split(' ', 1))
2492 total_files, total_bytes = map(int, l.split(' ', 1))
2490 except (ValueError, TypeError):
2493 except (ValueError, TypeError):
2491 raise error.ResponseError(
2494 raise error.ResponseError(
2492 _('unexpected response from remote server:'), l)
2495 _('unexpected response from remote server:'), l)
2493 self.ui.status(_('%d files to transfer, %s of data\n') %
2496 self.ui.status(_('%d files to transfer, %s of data\n') %
2494 (total_files, util.bytecount(total_bytes)))
2497 (total_files, util.bytecount(total_bytes)))
2495 handled_bytes = 0
2498 handled_bytes = 0
2496 self.ui.progress(_('clone'), 0, total=total_bytes)
2499 self.ui.progress(_('clone'), 0, total=total_bytes)
2497 start = time.time()
2500 start = time.time()
2498 for i in xrange(total_files):
2501 for i in xrange(total_files):
2499 # XXX doesn't support '\n' or '\r' in filenames
2502 # XXX doesn't support '\n' or '\r' in filenames
2500 l = fp.readline()
2503 l = fp.readline()
2501 try:
2504 try:
2502 name, size = l.split('\0', 1)
2505 name, size = l.split('\0', 1)
2503 size = int(size)
2506 size = int(size)
2504 except (ValueError, TypeError):
2507 except (ValueError, TypeError):
2505 raise error.ResponseError(
2508 raise error.ResponseError(
2506 _('unexpected response from remote server:'), l)
2509 _('unexpected response from remote server:'), l)
2507 if self.ui.debugflag:
2510 if self.ui.debugflag:
2508 self.ui.debug('adding %s (%s)\n' %
2511 self.ui.debug('adding %s (%s)\n' %
2509 (name, util.bytecount(size)))
2512 (name, util.bytecount(size)))
2510 # for backwards compat, name was partially encoded
2513 # for backwards compat, name was partially encoded
2511 ofp = self.sopener(store.decodedir(name), 'w')
2514 ofp = self.sopener(store.decodedir(name), 'w')
2512 for chunk in util.filechunkiter(fp, limit=size):
2515 for chunk in util.filechunkiter(fp, limit=size):
2513 handled_bytes += len(chunk)
2516 handled_bytes += len(chunk)
2514 self.ui.progress(_('clone'), handled_bytes,
2517 self.ui.progress(_('clone'), handled_bytes,
2515 total=total_bytes)
2518 total=total_bytes)
2516 ofp.write(chunk)
2519 ofp.write(chunk)
2517 ofp.close()
2520 ofp.close()
2518 elapsed = time.time() - start
2521 elapsed = time.time() - start
2519 if elapsed <= 0:
2522 if elapsed <= 0:
2520 elapsed = 0.001
2523 elapsed = 0.001
2521 self.ui.progress(_('clone'), None)
2524 self.ui.progress(_('clone'), None)
2522 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2525 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2523 (util.bytecount(total_bytes), elapsed,
2526 (util.bytecount(total_bytes), elapsed,
2524 util.bytecount(total_bytes / elapsed)))
2527 util.bytecount(total_bytes / elapsed)))
2525
2528
2526 # new requirements = old non-format requirements +
2529 # new requirements = old non-format requirements +
2527 # new format-related
2530 # new format-related
2528 # requirements from the streamed-in repository
2531 # requirements from the streamed-in repository
2529 requirements.update(set(self.requirements) - self.supportedformats)
2532 requirements.update(set(self.requirements) - self.supportedformats)
2530 self._applyrequirements(requirements)
2533 self._applyrequirements(requirements)
2531 self._writerequirements()
2534 self._writerequirements()
2532
2535
2533 if rbranchmap:
2536 if rbranchmap:
2534 rbheads = []
2537 rbheads = []
2535 for bheads in rbranchmap.itervalues():
2538 for bheads in rbranchmap.itervalues():
2536 rbheads.extend(bheads)
2539 rbheads.extend(bheads)
2537
2540
2538 self.branchcache = rbranchmap
2541 self.branchcache = rbranchmap
2539 if rbheads:
2542 if rbheads:
2540 rtiprev = max((int(self.changelog.rev(node))
2543 rtiprev = max((int(self.changelog.rev(node))
2541 for node in rbheads))
2544 for node in rbheads))
2542 self._writebranchcache(self.branchcache,
2545 self._writebranchcache(self.branchcache,
2543 self[rtiprev].node(), rtiprev)
2546 self[rtiprev].node(), rtiprev)
2544 self.invalidate()
2547 self.invalidate()
2545 return len(self.heads()) + 1
2548 return len(self.heads()) + 1
2546 finally:
2549 finally:
2547 lock.release()
2550 lock.release()
2548
2551
2549 def clone(self, remote, heads=[], stream=False):
2552 def clone(self, remote, heads=[], stream=False):
2550 '''clone remote repository.
2553 '''clone remote repository.
2551
2554
2552 keyword arguments:
2555 keyword arguments:
2553 heads: list of revs to clone (forces use of pull)
2556 heads: list of revs to clone (forces use of pull)
2554 stream: use streaming clone if possible'''
2557 stream: use streaming clone if possible'''
2555
2558
2556 # now, all clients that can request uncompressed clones can
2559 # now, all clients that can request uncompressed clones can
2557 # read repo formats supported by all servers that can serve
2560 # read repo formats supported by all servers that can serve
2558 # them.
2561 # them.
2559
2562
2560 # if revlog format changes, client will have to check version
2563 # if revlog format changes, client will have to check version
2561 # and format flags on "stream" capability, and use
2564 # and format flags on "stream" capability, and use
2562 # uncompressed only if compatible.
2565 # uncompressed only if compatible.
2563
2566
2564 if not stream:
2567 if not stream:
2565 # if the server explicitly prefers to stream (for fast LANs)
2568 # if the server explicitly prefers to stream (for fast LANs)
2566 stream = remote.capable('stream-preferred')
2569 stream = remote.capable('stream-preferred')
2567
2570
2568 if stream and not heads:
2571 if stream and not heads:
2569 # 'stream' means remote revlog format is revlogv1 only
2572 # 'stream' means remote revlog format is revlogv1 only
2570 if remote.capable('stream'):
2573 if remote.capable('stream'):
2571 return self.stream_in(remote, set(('revlogv1',)))
2574 return self.stream_in(remote, set(('revlogv1',)))
2572 # otherwise, 'streamreqs' contains the remote revlog format
2575 # otherwise, 'streamreqs' contains the remote revlog format
2573 streamreqs = remote.capable('streamreqs')
2576 streamreqs = remote.capable('streamreqs')
2574 if streamreqs:
2577 if streamreqs:
2575 streamreqs = set(streamreqs.split(','))
2578 streamreqs = set(streamreqs.split(','))
2576 # if we support it, stream in and adjust our requirements
2579 # if we support it, stream in and adjust our requirements
2577 if not streamreqs - self.supportedformats:
2580 if not streamreqs - self.supportedformats:
2578 return self.stream_in(remote, streamreqs)
2581 return self.stream_in(remote, streamreqs)
2579 return self.pull(remote, heads)
2582 return self.pull(remote, heads)
2580
2583
2581 def pushkey(self, namespace, key, old, new):
2584 def pushkey(self, namespace, key, old, new):
2582 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2585 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2583 old=old, new=new)
2586 old=old, new=new)
2584 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2587 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2585 ret = pushkey.push(self, namespace, key, old, new)
2588 ret = pushkey.push(self, namespace, key, old, new)
2586 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2589 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2587 ret=ret)
2590 ret=ret)
2588 return ret
2591 return ret
2589
2592
2590 def listkeys(self, namespace):
2593 def listkeys(self, namespace):
2591 self.hook('prelistkeys', throw=True, namespace=namespace)
2594 self.hook('prelistkeys', throw=True, namespace=namespace)
2592 self.ui.debug('listing keys for "%s"\n' % namespace)
2595 self.ui.debug('listing keys for "%s"\n' % namespace)
2593 values = pushkey.list(self, namespace)
2596 values = pushkey.list(self, namespace)
2594 self.hook('listkeys', namespace=namespace, values=values)
2597 self.hook('listkeys', namespace=namespace, values=values)
2595 return values
2598 return values
2596
2599
2597 def debugwireargs(self, one, two, three=None, four=None, five=None):
2600 def debugwireargs(self, one, two, three=None, four=None, five=None):
2598 '''used to test argument passing over the wire'''
2601 '''used to test argument passing over the wire'''
2599 return "%s %s %s %s %s" % (one, two, three, four, five)
2602 return "%s %s %s %s %s" % (one, two, three, four, five)
2600
2603
2601 def savecommitmessage(self, text):
2604 def savecommitmessage(self, text):
2602 fp = self.opener('last-message.txt', 'wb')
2605 fp = self.opener('last-message.txt', 'wb')
2603 try:
2606 try:
2604 fp.write(text)
2607 fp.write(text)
2605 finally:
2608 finally:
2606 fp.close()
2609 fp.close()
2607 return self.pathto(fp.name[len(self.root)+1:])
2610 return self.pathto(fp.name[len(self.root)+1:])
2608
2611
2609 # used to avoid circular references so destructors work
2612 # used to avoid circular references so destructors work
2610 def aftertrans(files):
2613 def aftertrans(files):
2611 renamefiles = [tuple(t) for t in files]
2614 renamefiles = [tuple(t) for t in files]
2612 def a():
2615 def a():
2613 for src, dest in renamefiles:
2616 for src, dest in renamefiles:
2614 try:
2617 try:
2615 util.rename(src, dest)
2618 util.rename(src, dest)
2616 except OSError: # journal file does not yet exist
2619 except OSError: # journal file does not yet exist
2617 pass
2620 pass
2618 return a
2621 return a
2619
2622
2620 def undoname(fn):
2623 def undoname(fn):
2621 base, name = os.path.split(fn)
2624 base, name = os.path.split(fn)
2622 assert name.startswith('journal')
2625 assert name.startswith('journal')
2623 return os.path.join(base, name.replace('journal', 'undo', 1))
2626 return os.path.join(base, name.replace('journal', 'undo', 1))
2624
2627
2625 def instance(ui, path, create):
2628 def instance(ui, path, create):
2626 return localrepository(ui, util.urllocalpath(path), create)
2629 return localrepository(ui, util.urllocalpath(path), create)
2627
2630
2628 def islocal(path):
2631 def islocal(path):
2629 return True
2632 return True
@@ -1,272 +1,273 b''
1 Check that obsolete properly strip heads
1 Check that obsolete properly strip heads
2 $ cat > obs.py << EOF
2 $ cat > obs.py << EOF
3 > import mercurial.obsolete
3 > import mercurial.obsolete
4 > mercurial.obsolete._enabled = True
4 > mercurial.obsolete._enabled = True
5 > EOF
5 > EOF
6 $ cat >> $HGRCPATH << EOF
6 $ cat >> $HGRCPATH << EOF
7 > [phases]
7 > [phases]
8 > # public changeset are not obsolete
8 > # public changeset are not obsolete
9 > publish=false
9 > publish=false
10 > [ui]
10 > [ui]
11 > logtemplate='{node|short} ({phase}) {desc|firstline}\n'
11 > logtemplate='{node|short} ({phase}) {desc|firstline}\n'
12 > [extensions]
12 > [extensions]
13 > graphlog=
13 > graphlog=
14 > EOF
14 > EOF
15 $ echo "obs=${TESTTMP}/obs.py" >> $HGRCPATH
15 $ echo "obs=${TESTTMP}/obs.py" >> $HGRCPATH
16 $ mkcommit() {
16 $ mkcommit() {
17 > echo "$1" > "$1"
17 > echo "$1" > "$1"
18 > hg add "$1"
18 > hg add "$1"
19 > hg ci -m "add $1"
19 > hg ci -m "add $1"
20 > }
20 > }
21 $ getid() {
21 $ getid() {
22 > hg id --debug -ir "desc('$1')"
22 > hg id --debug -ir "desc('$1')"
23 > }
23 > }
24
24
25
25
26 $ hg init remote
26 $ hg init remote
27 $ cd remote
27 $ cd remote
28 $ mkcommit base
28 $ mkcommit base
29 $ hg phase --public .
29 $ hg phase --public .
30 $ cd ..
30 $ cd ..
31 $ cp -r remote base
31 $ cp -r remote base
32 $ hg clone remote local
32 $ hg clone remote local
33 updating to branch default
33 updating to branch default
34 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
34 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
35 $ cd local
35 $ cd local
36
36
37 New head replaces old head
37 New head replaces old head
38 ==========================
38 ==========================
39
39
40 setup
40 setup
41 (we add the 1 flags to prevent bumped error during the test)
41
42
42 $ mkcommit old
43 $ mkcommit old
43 $ hg push
44 $ hg push
44 pushing to $TESTTMP/remote (glob)
45 pushing to $TESTTMP/remote (glob)
45 searching for changes
46 searching for changes
46 adding changesets
47 adding changesets
47 adding manifests
48 adding manifests
48 adding file changes
49 adding file changes
49 added 1 changesets with 1 changes to 1 files
50 added 1 changesets with 1 changes to 1 files
50 $ hg up -q '.^'
51 $ hg up -q '.^'
51 $ mkcommit new
52 $ mkcommit new
52 created new head
53 created new head
53 $ hg debugobsolete `getid old` `getid new`
54 $ hg debugobsolete --flags 1 `getid old` `getid new`
54 $ hg glog --hidden
55 $ hg glog --hidden
55 @ 71e3228bffe1 (draft) add new
56 @ 71e3228bffe1 (draft) add new
56 |
57 |
57 | x c70b08862e08 (draft) add old
58 | x c70b08862e08 (draft) add old
58 |/
59 |/
59 o b4952fcf48cf (public) add base
60 o b4952fcf48cf (public) add base
60
61
61 $ cp -r ../remote ../backup1
62 $ cp -r ../remote ../backup1
62
63
63 old exists remotely as draft. It is obsoleted by new that we now push.
64 old exists remotely as draft. It is obsoleted by new that we now push.
64 Push should not warn about creating new head
65 Push should not warn about creating new head
65
66
66 $ hg push
67 $ hg push
67 pushing to $TESTTMP/remote (glob)
68 pushing to $TESTTMP/remote (glob)
68 searching for changes
69 searching for changes
69 adding changesets
70 adding changesets
70 adding manifests
71 adding manifests
71 adding file changes
72 adding file changes
72 added 1 changesets with 1 changes to 1 files (+1 heads)
73 added 1 changesets with 1 changes to 1 files (+1 heads)
73
74
74 old head is now public (public local version)
75 old head is now public (public local version)
75 =============================================
76 =============================================
76
77
77 setup
78 setup
78
79
79 $ rm -fr ../remote
80 $ rm -fr ../remote
80 $ cp -r ../backup1 ../remote
81 $ cp -r ../backup1 ../remote
81 $ hg -R ../remote phase --public c70b08862e08
82 $ hg -R ../remote phase --public c70b08862e08
82 $ hg pull -v
83 $ hg pull -v
83 pulling from $TESTTMP/remote (glob)
84 pulling from $TESTTMP/remote (glob)
84 searching for changes
85 searching for changes
85 no changes found
86 no changes found
86 $ hg glog --hidden
87 $ hg glog --hidden
87 @ 71e3228bffe1 (draft) add new
88 @ 71e3228bffe1 (draft) add new
88 |
89 |
89 | o c70b08862e08 (public) add old
90 | o c70b08862e08 (public) add old
90 |/
91 |/
91 o b4952fcf48cf (public) add base
92 o b4952fcf48cf (public) add base
92
93
93
94
94 Abort: old will still be an head because it's public.
95 Abort: old will still be an head because it's public.
95
96
96 $ hg push
97 $ hg push
97 pushing to $TESTTMP/remote (glob)
98 pushing to $TESTTMP/remote (glob)
98 searching for changes
99 searching for changes
99 abort: push creates new remote head 71e3228bffe1!
100 abort: push creates new remote head 71e3228bffe1!
100 (did you forget to merge? use push -f to force)
101 (did you forget to merge? use push -f to force)
101 [255]
102 [255]
102
103
103 old head is now public (public remote version)
104 old head is now public (public remote version)
104 ==============================================
105 ==============================================
105
106
106 TODO: Not implemented yet.
107 TODO: Not implemented yet.
107
108
108 # setup
109 # setup
109 #
110 #
110 # $ rm -fr ../remote
111 # $ rm -fr ../remote
111 # $ cp -r ../backup1 ../remote
112 # $ cp -r ../backup1 ../remote
112 # $ hg -R ../remote phase --public c70b08862e08
113 # $ hg -R ../remote phase --public c70b08862e08
113 # $ hg phase --draft --force c70b08862e08
114 # $ hg phase --draft --force c70b08862e08
114 # $ hg glog --hidden
115 # $ hg glog --hidden
115 # @ 71e3228bffe1 (draft) add new
116 # @ 71e3228bffe1 (draft) add new
116 # |
117 # |
117 # | x c70b08862e08 (draft) add old
118 # | x c70b08862e08 (draft) add old
118 # |/
119 # |/
119 # o b4952fcf48cf (public) add base
120 # o b4952fcf48cf (public) add base
120 #
121 #
121 #
122 #
122 #
123 #
123 # Abort: old will still be an head because it's public.
124 # Abort: old will still be an head because it's public.
124 #
125 #
125 # $ hg push
126 # $ hg push
126 # pushing to $TESTTMP/remote
127 # pushing to $TESTTMP/remote
127 # searching for changes
128 # searching for changes
128 # abort: push creates new remote head 71e3228bffe1!
129 # abort: push creates new remote head 71e3228bffe1!
129 # (did you forget to merge? use push -f to force)
130 # (did you forget to merge? use push -f to force)
130 # [255]
131 # [255]
131
132
132 old head is obsolete but replacement is not pushed
133 old head is obsolete but replacement is not pushed
133 ==================================================
134 ==================================================
134
135
135 setup
136 setup
136
137
137 $ rm -fr ../remote
138 $ rm -fr ../remote
138 $ cp -r ../backup1 ../remote
139 $ cp -r ../backup1 ../remote
139 $ hg phase --draft --force '(0::) - 0'
140 $ hg phase --draft --force '(0::) - 0'
140 $ hg up -q '.^'
141 $ hg up -q '.^'
141 $ mkcommit other
142 $ mkcommit other
142 created new head
143 created new head
143 $ hg glog --hidden
144 $ hg glog --hidden
144 @ d7d41ccbd4de (draft) add other
145 @ d7d41ccbd4de (draft) add other
145 |
146 |
146 | o 71e3228bffe1 (draft) add new
147 | o 71e3228bffe1 (draft) add new
147 |/
148 |/
148 | x c70b08862e08 (draft) add old
149 | x c70b08862e08 (draft) add old
149 |/
150 |/
150 o b4952fcf48cf (public) add base
151 o b4952fcf48cf (public) add base
151
152
152
153
153 old exists remotely as draft. It is obsoleted by new but we don't push new.
154 old exists remotely as draft. It is obsoleted by new but we don't push new.
154 Push should abort on new head
155 Push should abort on new head
155
156
156 $ hg push -r 'desc("other")'
157 $ hg push -r 'desc("other")'
157 pushing to $TESTTMP/remote (glob)
158 pushing to $TESTTMP/remote (glob)
158 searching for changes
159 searching for changes
159 abort: push creates new remote head d7d41ccbd4de!
160 abort: push creates new remote head d7d41ccbd4de!
160 (did you forget to merge? use push -f to force)
161 (did you forget to merge? use push -f to force)
161 [255]
162 [255]
162
163
163
164
164
165
165 Both precursors and successors are already know remotely. Descendant adds heads
166 Both precursors and successors are already know remotely. Descendant adds heads
166 ===============================================================================
167 ===============================================================================
167
168
168 setup. (The obsolete marker is known locally only
169 setup. (The obsolete marker is known locally only
169
170
170 $ cd ..
171 $ cd ..
171 $ rm -rf local
172 $ rm -rf local
172 $ hg clone remote local
173 $ hg clone remote local
173 updating to branch default
174 updating to branch default
174 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
175 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
175 $ cd local
176 $ cd local
176 $ mkcommit old
177 $ mkcommit old
177 old already tracked!
178 old already tracked!
178 nothing changed
179 nothing changed
179 [1]
180 [1]
180 $ hg up -q '.^'
181 $ hg up -q '.^'
181 $ mkcommit new
182 $ mkcommit new
182 created new head
183 created new head
183 $ hg push -f
184 $ hg push -f
184 pushing to $TESTTMP/remote (glob)
185 pushing to $TESTTMP/remote (glob)
185 searching for changes
186 searching for changes
186 adding changesets
187 adding changesets
187 adding manifests
188 adding manifests
188 adding file changes
189 adding file changes
189 added 1 changesets with 1 changes to 1 files (+1 heads)
190 added 1 changesets with 1 changes to 1 files (+1 heads)
190 $ mkcommit desc1
191 $ mkcommit desc1
191 $ hg up -q '.^'
192 $ hg up -q '.^'
192 $ mkcommit desc2
193 $ mkcommit desc2
193 created new head
194 created new head
194 $ hg debugobsolete `getid old` `getid new`
195 $ hg debugobsolete `getid old` `getid new`
195 $ hg glog --hidden
196 $ hg glog --hidden
196 @ 5fe37041cc2b (draft) add desc2
197 @ 5fe37041cc2b (draft) add desc2
197 |
198 |
198 | o a3ef1d111c5f (draft) add desc1
199 | o a3ef1d111c5f (draft) add desc1
199 |/
200 |/
200 o 71e3228bffe1 (draft) add new
201 o 71e3228bffe1 (draft) add new
201 |
202 |
202 | x c70b08862e08 (draft) add old
203 | x c70b08862e08 (draft) add old
203 |/
204 |/
204 o b4952fcf48cf (public) add base
205 o b4952fcf48cf (public) add base
205
206
206 $ hg glog --hidden -R ../remote
207 $ hg glog --hidden -R ../remote
207 o 71e3228bffe1 (draft) add new
208 o 71e3228bffe1 (draft) add new
208 |
209 |
209 | o c70b08862e08 (draft) add old
210 | o c70b08862e08 (draft) add old
210 |/
211 |/
211 @ b4952fcf48cf (public) add base
212 @ b4952fcf48cf (public) add base
212
213
213 $ cp -r ../remote ../backup2
214 $ cp -r ../remote ../backup2
214
215
215 Push should not warn about adding new heads. We create one, but we'll delete
216 Push should not warn about adding new heads. We create one, but we'll delete
216 one anyway.
217 one anyway.
217
218
218 $ hg push
219 $ hg push
219 pushing to $TESTTMP/remote (glob)
220 pushing to $TESTTMP/remote (glob)
220 searching for changes
221 searching for changes
221 adding changesets
222 adding changesets
222 adding manifests
223 adding manifests
223 adding file changes
224 adding file changes
224 added 2 changesets with 2 changes to 2 files (+1 heads)
225 added 2 changesets with 2 changes to 2 files (+1 heads)
225
226
226
227
227 Remote head is unknown but obsoleted by a local changeset
228 Remote head is unknown but obsoleted by a local changeset
228 =========================================================
229 =========================================================
229
230
230 setup
231 setup
231
232
232 $ rm -fr ../remote
233 $ rm -fr ../remote
233 $ cp -r ../backup1 ../remote
234 $ cp -r ../backup1 ../remote
234 $ cd ..
235 $ cd ..
235 $ rm -rf local
236 $ rm -rf local
236 $ hg clone remote local -r 0
237 $ hg clone remote local -r 0
237 adding changesets
238 adding changesets
238 adding manifests
239 adding manifests
239 adding file changes
240 adding file changes
240 added 1 changesets with 1 changes to 1 files
241 added 1 changesets with 1 changes to 1 files
241 updating to branch default
242 updating to branch default
242 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
243 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
243 $ cd local
244 $ cd local
244 $ mkcommit new
245 $ mkcommit new
245 $ hg -R ../remote id --debug -r tip
246 $ hg -R ../remote id --debug -r tip
246 c70b08862e0838ea6d7c59c85da2f1ed6c8d67da tip
247 c70b08862e0838ea6d7c59c85da2f1ed6c8d67da tip
247 $ hg id --debug -r tip
248 $ hg id --debug -r tip
248 71e3228bffe1886550777233d6c97bb5a6b2a650 tip
249 71e3228bffe1886550777233d6c97bb5a6b2a650 tip
249 $ hg debugobsolete c70b08862e0838ea6d7c59c85da2f1ed6c8d67da 71e3228bffe1886550777233d6c97bb5a6b2a650
250 $ hg debugobsolete c70b08862e0838ea6d7c59c85da2f1ed6c8d67da 71e3228bffe1886550777233d6c97bb5a6b2a650
250 $ hg glog --hidden
251 $ hg glog --hidden
251 @ 71e3228bffe1 (draft) add new
252 @ 71e3228bffe1 (draft) add new
252 |
253 |
253 o b4952fcf48cf (public) add base
254 o b4952fcf48cf (public) add base
254
255
255 $ hg glog --hidden -R ../remote
256 $ hg glog --hidden -R ../remote
256 o c70b08862e08 (draft) add old
257 o c70b08862e08 (draft) add old
257 |
258 |
258 @ b4952fcf48cf (public) add base
259 @ b4952fcf48cf (public) add base
259
260
260
261
261 Push should not complain about new heads.
262 Push should not complain about new heads.
262
263
263 It should not complain about "unsynced remote changes!" either but that's not
264 It should not complain about "unsynced remote changes!" either but that's not
264 handled yet.
265 handled yet.
265
266
266 $ hg push --traceback
267 $ hg push --traceback
267 pushing to $TESTTMP/remote (glob)
268 pushing to $TESTTMP/remote (glob)
268 searching for changes
269 searching for changes
269 adding changesets
270 adding changesets
270 adding manifests
271 adding manifests
271 adding file changes
272 adding file changes
272 added 1 changesets with 1 changes to 1 files (+1 heads)
273 added 1 changesets with 1 changes to 1 files (+1 heads)
@@ -1,584 +1,592 b''
1 $ cat >> $HGRCPATH << EOF
1 $ cat >> $HGRCPATH << EOF
2 > [extensions]
2 > [extensions]
3 > graphlog=
3 > graphlog=
4 > [phases]
4 > [phases]
5 > # public changeset are not obsolete
5 > # public changeset are not obsolete
6 > publish=false
6 > publish=false
7 > EOF
7 > EOF
8 $ mkcommit() {
8 $ mkcommit() {
9 > echo "$1" > "$1"
9 > echo "$1" > "$1"
10 > hg add "$1"
10 > hg add "$1"
11 > hg ci -m "add $1"
11 > hg ci -m "add $1"
12 > }
12 > }
13 $ getid() {
13 $ getid() {
14 > hg id --debug -ir "desc('$1')"
14 > hg id --debug -ir "desc('$1')"
15 > }
15 > }
16
16
17 $ cat > debugkeys.py <<EOF
17 $ cat > debugkeys.py <<EOF
18 > def reposetup(ui, repo):
18 > def reposetup(ui, repo):
19 > class debugkeysrepo(repo.__class__):
19 > class debugkeysrepo(repo.__class__):
20 > def listkeys(self, namespace):
20 > def listkeys(self, namespace):
21 > ui.write('listkeys %s\n' % (namespace,))
21 > ui.write('listkeys %s\n' % (namespace,))
22 > return super(debugkeysrepo, self).listkeys(namespace)
22 > return super(debugkeysrepo, self).listkeys(namespace)
23 >
23 >
24 > if repo.local():
24 > if repo.local():
25 > repo.__class__ = debugkeysrepo
25 > repo.__class__ = debugkeysrepo
26 > EOF
26 > EOF
27
27
28 $ hg init tmpa
28 $ hg init tmpa
29 $ cd tmpa
29 $ cd tmpa
30 $ mkcommit kill_me
30 $ mkcommit kill_me
31
31
32 Checking that the feature is properly disabled
32 Checking that the feature is properly disabled
33
33
34 $ hg debugobsolete -d '0 0' `getid kill_me` -u babar
34 $ hg debugobsolete -d '0 0' `getid kill_me` -u babar
35 abort: obsolete feature is not enabled on this repo
35 abort: obsolete feature is not enabled on this repo
36 [255]
36 [255]
37
37
38 Enabling it
38 Enabling it
39
39
40 $ cat > ../obs.py << EOF
40 $ cat > ../obs.py << EOF
41 > import mercurial.obsolete
41 > import mercurial.obsolete
42 > mercurial.obsolete._enabled = True
42 > mercurial.obsolete._enabled = True
43 > EOF
43 > EOF
44 $ echo '[extensions]' >> $HGRCPATH
44 $ echo '[extensions]' >> $HGRCPATH
45 $ echo "obs=${TESTTMP}/obs.py" >> $HGRCPATH
45 $ echo "obs=${TESTTMP}/obs.py" >> $HGRCPATH
46
46
47 Killing a single changeset without replacement
47 Killing a single changeset without replacement
48
48
49 $ hg debugobsolete 0
49 $ hg debugobsolete 0
50 abort: changeset references must be full hexadecimal node identifiers
50 abort: changeset references must be full hexadecimal node identifiers
51 [255]
51 [255]
52 $ hg debugobsolete '00'
52 $ hg debugobsolete '00'
53 abort: changeset references must be full hexadecimal node identifiers
53 abort: changeset references must be full hexadecimal node identifiers
54 [255]
54 [255]
55 $ hg debugobsolete -d '0 0' `getid kill_me` -u babar
55 $ hg debugobsolete -d '0 0' `getid kill_me` -u babar
56 $ hg debugobsolete
56 $ hg debugobsolete
57 97b7c2d76b1845ed3eb988cd612611e72406cef0 0 {'date': '0 0', 'user': 'babar'}
57 97b7c2d76b1845ed3eb988cd612611e72406cef0 0 {'date': '0 0', 'user': 'babar'}
58 $ cd ..
58 $ cd ..
59
59
60 Killing a single changeset with replacement
60 Killing a single changeset with replacement
61
61
62 $ hg init tmpb
62 $ hg init tmpb
63 $ cd tmpb
63 $ cd tmpb
64 $ mkcommit a
64 $ mkcommit a
65 $ mkcommit b
65 $ mkcommit b
66 $ mkcommit original_c
66 $ mkcommit original_c
67 $ hg up "desc('b')"
67 $ hg up "desc('b')"
68 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
68 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
69 $ mkcommit new_c
69 $ mkcommit new_c
70 created new head
70 created new head
71 $ hg log -r 'hidden()' --template '{rev}:{node|short} {desc}\n' --hidden
71 $ hg log -r 'hidden()' --template '{rev}:{node|short} {desc}\n' --hidden
72 $ hg debugobsolete --flag 12 `getid original_c` `getid new_c` -d '56 12'
72 $ hg debugobsolete --flag 12 `getid original_c` `getid new_c` -d '56 12'
73 $ hg log -r 'hidden()' --template '{rev}:{node|short} {desc}\n' --hidden
73 $ hg log -r 'hidden()' --template '{rev}:{node|short} {desc}\n' --hidden
74 2:245bde4270cd add original_c
74 2:245bde4270cd add original_c
75 $ hg debugobsolete
75 $ hg debugobsolete
76 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C {'date': '56 12', 'user': 'test'}
76 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C {'date': '56 12', 'user': 'test'}
77
77
78 do it again (it read the obsstore before adding new changeset)
78 do it again (it read the obsstore before adding new changeset)
79
79
80 $ hg up '.^'
80 $ hg up '.^'
81 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
81 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
82 $ mkcommit new_2_c
82 $ mkcommit new_2_c
83 created new head
83 created new head
84 $ hg debugobsolete -d '1337 0' `getid new_c` `getid new_2_c`
84 $ hg debugobsolete -d '1337 0' `getid new_c` `getid new_2_c`
85 $ hg debugobsolete
85 $ hg debugobsolete
86 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C {'date': '56 12', 'user': 'test'}
86 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C {'date': '56 12', 'user': 'test'}
87 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
87 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
88
88
89 Register two markers with a missing node
89 Register two markers with a missing node
90
90
91 $ hg up '.^'
91 $ hg up '.^'
92 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
92 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
93 $ mkcommit new_3_c
93 $ mkcommit new_3_c
94 created new head
94 created new head
95 $ hg debugobsolete -d '1338 0' `getid new_2_c` 1337133713371337133713371337133713371337
95 $ hg debugobsolete -d '1338 0' `getid new_2_c` 1337133713371337133713371337133713371337
96 $ hg debugobsolete -d '1339 0' 1337133713371337133713371337133713371337 `getid new_3_c`
96 $ hg debugobsolete -d '1339 0' 1337133713371337133713371337133713371337 `getid new_3_c`
97 $ hg debugobsolete
97 $ hg debugobsolete
98 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C {'date': '56 12', 'user': 'test'}
98 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C {'date': '56 12', 'user': 'test'}
99 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
99 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
100 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
100 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
101 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
101 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
102
102
103 Refuse pathological nullid successors
103 Refuse pathological nullid successors
104 $ hg debugobsolete -d '9001 0' 1337133713371337133713371337133713371337 0000000000000000000000000000000000000000
104 $ hg debugobsolete -d '9001 0' 1337133713371337133713371337133713371337 0000000000000000000000000000000000000000
105 transaction abort!
105 transaction abort!
106 rollback completed
106 rollback completed
107 abort: bad obsolescence marker detected: invalid successors nullid
107 abort: bad obsolescence marker detected: invalid successors nullid
108 [255]
108 [255]
109
109
110 Check that graphlog detect that a changeset is obsolete:
110 Check that graphlog detect that a changeset is obsolete:
111
111
112 $ hg glog
112 $ hg glog
113 @ changeset: 5:5601fb93a350
113 @ changeset: 5:5601fb93a350
114 | tag: tip
114 | tag: tip
115 | parent: 1:7c3bad9141dc
115 | parent: 1:7c3bad9141dc
116 | user: test
116 | user: test
117 | date: Thu Jan 01 00:00:00 1970 +0000
117 | date: Thu Jan 01 00:00:00 1970 +0000
118 | summary: add new_3_c
118 | summary: add new_3_c
119 |
119 |
120 o changeset: 1:7c3bad9141dc
120 o changeset: 1:7c3bad9141dc
121 | user: test
121 | user: test
122 | date: Thu Jan 01 00:00:00 1970 +0000
122 | date: Thu Jan 01 00:00:00 1970 +0000
123 | summary: add b
123 | summary: add b
124 |
124 |
125 o changeset: 0:1f0dee641bb7
125 o changeset: 0:1f0dee641bb7
126 user: test
126 user: test
127 date: Thu Jan 01 00:00:00 1970 +0000
127 date: Thu Jan 01 00:00:00 1970 +0000
128 summary: add a
128 summary: add a
129
129
130
130
131 Check that public changeset are not accounted as obsolete:
131 Check that public changeset are not accounted as obsolete:
132
132
133 $ hg phase --public 2
133 $ hg phase --public 2
134 $ hg --config 'extensions.graphlog=' glog
134 $ hg --config 'extensions.graphlog=' glog
135 @ changeset: 5:5601fb93a350
135 @ changeset: 5:5601fb93a350
136 | tag: tip
136 | tag: tip
137 | parent: 1:7c3bad9141dc
137 | parent: 1:7c3bad9141dc
138 | user: test
138 | user: test
139 | date: Thu Jan 01 00:00:00 1970 +0000
139 | date: Thu Jan 01 00:00:00 1970 +0000
140 | summary: add new_3_c
140 | summary: add new_3_c
141 |
141 |
142 | o changeset: 2:245bde4270cd
142 | o changeset: 2:245bde4270cd
143 |/ user: test
143 |/ user: test
144 | date: Thu Jan 01 00:00:00 1970 +0000
144 | date: Thu Jan 01 00:00:00 1970 +0000
145 | summary: add original_c
145 | summary: add original_c
146 |
146 |
147 o changeset: 1:7c3bad9141dc
147 o changeset: 1:7c3bad9141dc
148 | user: test
148 | user: test
149 | date: Thu Jan 01 00:00:00 1970 +0000
149 | date: Thu Jan 01 00:00:00 1970 +0000
150 | summary: add b
150 | summary: add b
151 |
151 |
152 o changeset: 0:1f0dee641bb7
152 o changeset: 0:1f0dee641bb7
153 user: test
153 user: test
154 date: Thu Jan 01 00:00:00 1970 +0000
154 date: Thu Jan 01 00:00:00 1970 +0000
155 summary: add a
155 summary: add a
156
156
157
157
158 And that bumped changeset are detected
158 And that bumped changeset are detected
159 --------------------------------------
159 --------------------------------------
160
160
161 If we didn't filtered obsolete changesets out, 3 and 4 would show up too. Also
161 If we didn't filtered obsolete changesets out, 3 and 4 would show up too. Also
162 note that the bumped changeset (5:5601fb93a350) is not a direct successor of
162 note that the bumped changeset (5:5601fb93a350) is not a direct successor of
163 the public changeset
163 the public changeset
164
164
165 $ hg log --hidden -r 'bumped()'
165 $ hg log --hidden -r 'bumped()'
166 changeset: 5:5601fb93a350
166 changeset: 5:5601fb93a350
167 tag: tip
167 tag: tip
168 parent: 1:7c3bad9141dc
168 parent: 1:7c3bad9141dc
169 user: test
169 user: test
170 date: Thu Jan 01 00:00:00 1970 +0000
170 date: Thu Jan 01 00:00:00 1970 +0000
171 summary: add new_3_c
171 summary: add new_3_c
172
172
173
173
174 And that we can't push bumped changeset
175
176 $ hg push ../tmpa
177 pushing to ../tmpa
178 searching for changes
179 abort: push includes bumped changeset: 5601fb93a350!
180 [255]
181
174 Fixing "bumped" situation
182 Fixing "bumped" situation
175 We need to create a clone of 5 and add a special marker with a flag
183 We need to create a clone of 5 and add a special marker with a flag
176
184
177 $ hg up '5^'
185 $ hg up '5^'
178 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
186 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
179 $ hg revert -ar 5
187 $ hg revert -ar 5
180 adding new_3_c
188 adding new_3_c
181 $ hg ci -m 'add n3w_3_c'
189 $ hg ci -m 'add n3w_3_c'
182 created new head
190 created new head
183 $ hg debugobsolete -d '1338 0' --flags 1 `getid new_3_c` `getid n3w_3_c`
191 $ hg debugobsolete -d '1338 0' --flags 1 `getid new_3_c` `getid n3w_3_c`
184 $ hg log -r 'bumped()'
192 $ hg log -r 'bumped()'
185 $ hg log -G
193 $ hg log -G
186 @ changeset: 6:6f9641995072
194 @ changeset: 6:6f9641995072
187 | tag: tip
195 | tag: tip
188 | parent: 1:7c3bad9141dc
196 | parent: 1:7c3bad9141dc
189 | user: test
197 | user: test
190 | date: Thu Jan 01 00:00:00 1970 +0000
198 | date: Thu Jan 01 00:00:00 1970 +0000
191 | summary: add n3w_3_c
199 | summary: add n3w_3_c
192 |
200 |
193 | o changeset: 2:245bde4270cd
201 | o changeset: 2:245bde4270cd
194 |/ user: test
202 |/ user: test
195 | date: Thu Jan 01 00:00:00 1970 +0000
203 | date: Thu Jan 01 00:00:00 1970 +0000
196 | summary: add original_c
204 | summary: add original_c
197 |
205 |
198 o changeset: 1:7c3bad9141dc
206 o changeset: 1:7c3bad9141dc
199 | user: test
207 | user: test
200 | date: Thu Jan 01 00:00:00 1970 +0000
208 | date: Thu Jan 01 00:00:00 1970 +0000
201 | summary: add b
209 | summary: add b
202 |
210 |
203 o changeset: 0:1f0dee641bb7
211 o changeset: 0:1f0dee641bb7
204 user: test
212 user: test
205 date: Thu Jan 01 00:00:00 1970 +0000
213 date: Thu Jan 01 00:00:00 1970 +0000
206 summary: add a
214 summary: add a
207
215
208
216
209
217
210
218
211 $ cd ..
219 $ cd ..
212
220
213 Exchange Test
221 Exchange Test
214 ============================
222 ============================
215
223
216 Destination repo does not have any data
224 Destination repo does not have any data
217 ---------------------------------------
225 ---------------------------------------
218
226
219 Try to pull markers
227 Try to pull markers
220 (extinct changeset are excluded but marker are pushed)
228 (extinct changeset are excluded but marker are pushed)
221
229
222 $ hg init tmpc
230 $ hg init tmpc
223 $ cd tmpc
231 $ cd tmpc
224 $ hg pull ../tmpb
232 $ hg pull ../tmpb
225 pulling from ../tmpb
233 pulling from ../tmpb
226 requesting all changes
234 requesting all changes
227 adding changesets
235 adding changesets
228 adding manifests
236 adding manifests
229 adding file changes
237 adding file changes
230 added 4 changesets with 4 changes to 4 files (+1 heads)
238 added 4 changesets with 4 changes to 4 files (+1 heads)
231 (run 'hg heads' to see heads, 'hg merge' to merge)
239 (run 'hg heads' to see heads, 'hg merge' to merge)
232 $ hg debugobsolete
240 $ hg debugobsolete
233 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C {'date': '56 12', 'user': 'test'}
241 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C {'date': '56 12', 'user': 'test'}
234 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
242 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
235 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
243 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
236 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
244 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
237 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 {'date': '1338 0', 'user': 'test'}
245 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 {'date': '1338 0', 'user': 'test'}
238
246
239 Rollback//Transaction support
247 Rollback//Transaction support
240
248
241 $ hg debugobsolete -d '1340 0' aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
249 $ hg debugobsolete -d '1340 0' aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
242 $ hg debugobsolete
250 $ hg debugobsolete
243 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C {'date': '56 12', 'user': 'test'}
251 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C {'date': '56 12', 'user': 'test'}
244 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
252 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
245 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
253 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
246 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
254 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
247 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 {'date': '1338 0', 'user': 'test'}
255 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 {'date': '1338 0', 'user': 'test'}
248 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 0 {'date': '1340 0', 'user': 'test'}
256 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 0 {'date': '1340 0', 'user': 'test'}
249 $ hg rollback -n
257 $ hg rollback -n
250 repository tip rolled back to revision 3 (undo debugobsolete)
258 repository tip rolled back to revision 3 (undo debugobsolete)
251 $ hg rollback
259 $ hg rollback
252 repository tip rolled back to revision 3 (undo debugobsolete)
260 repository tip rolled back to revision 3 (undo debugobsolete)
253 $ hg debugobsolete
261 $ hg debugobsolete
254 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C {'date': '56 12', 'user': 'test'}
262 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C {'date': '56 12', 'user': 'test'}
255 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
263 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
256 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
264 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
257 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
265 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
258 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 {'date': '1338 0', 'user': 'test'}
266 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 {'date': '1338 0', 'user': 'test'}
259
267
260 $ cd ..
268 $ cd ..
261
269
262 Try to pull markers
270 Try to pull markers
263
271
264 $ hg init tmpd
272 $ hg init tmpd
265 $ hg -R tmpb push tmpd
273 $ hg -R tmpb push tmpd
266 pushing to tmpd
274 pushing to tmpd
267 searching for changes
275 searching for changes
268 adding changesets
276 adding changesets
269 adding manifests
277 adding manifests
270 adding file changes
278 adding file changes
271 added 4 changesets with 4 changes to 4 files (+1 heads)
279 added 4 changesets with 4 changes to 4 files (+1 heads)
272 $ hg -R tmpd debugobsolete
280 $ hg -R tmpd debugobsolete
273 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C {'date': '56 12', 'user': 'test'}
281 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C {'date': '56 12', 'user': 'test'}
274 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
282 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
275 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
283 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
276 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
284 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
277 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 {'date': '1338 0', 'user': 'test'}
285 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 {'date': '1338 0', 'user': 'test'}
278
286
279 Check obsolete keys are exchanged only if source has an obsolete store
287 Check obsolete keys are exchanged only if source has an obsolete store
280
288
281 $ hg init empty
289 $ hg init empty
282 $ hg --config extensions.debugkeys=debugkeys.py -R empty push tmpd
290 $ hg --config extensions.debugkeys=debugkeys.py -R empty push tmpd
283 pushing to tmpd
291 pushing to tmpd
284 no changes found
292 no changes found
285 listkeys phases
293 listkeys phases
286 listkeys bookmarks
294 listkeys bookmarks
287 [1]
295 [1]
288
296
289 clone support
297 clone support
290 (markers are copied and extinct changesets are included to allow hardlinks)
298 (markers are copied and extinct changesets are included to allow hardlinks)
291
299
292 $ hg clone tmpb clone-dest
300 $ hg clone tmpb clone-dest
293 updating to branch default
301 updating to branch default
294 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
302 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
295 $ hg -R clone-dest log -G --hidden
303 $ hg -R clone-dest log -G --hidden
296 @ changeset: 6:6f9641995072
304 @ changeset: 6:6f9641995072
297 | tag: tip
305 | tag: tip
298 | parent: 1:7c3bad9141dc
306 | parent: 1:7c3bad9141dc
299 | user: test
307 | user: test
300 | date: Thu Jan 01 00:00:00 1970 +0000
308 | date: Thu Jan 01 00:00:00 1970 +0000
301 | summary: add n3w_3_c
309 | summary: add n3w_3_c
302 |
310 |
303 | x changeset: 5:5601fb93a350
311 | x changeset: 5:5601fb93a350
304 |/ parent: 1:7c3bad9141dc
312 |/ parent: 1:7c3bad9141dc
305 | user: test
313 | user: test
306 | date: Thu Jan 01 00:00:00 1970 +0000
314 | date: Thu Jan 01 00:00:00 1970 +0000
307 | summary: add new_3_c
315 | summary: add new_3_c
308 |
316 |
309 | x changeset: 4:ca819180edb9
317 | x changeset: 4:ca819180edb9
310 |/ parent: 1:7c3bad9141dc
318 |/ parent: 1:7c3bad9141dc
311 | user: test
319 | user: test
312 | date: Thu Jan 01 00:00:00 1970 +0000
320 | date: Thu Jan 01 00:00:00 1970 +0000
313 | summary: add new_2_c
321 | summary: add new_2_c
314 |
322 |
315 | x changeset: 3:cdbce2fbb163
323 | x changeset: 3:cdbce2fbb163
316 |/ parent: 1:7c3bad9141dc
324 |/ parent: 1:7c3bad9141dc
317 | user: test
325 | user: test
318 | date: Thu Jan 01 00:00:00 1970 +0000
326 | date: Thu Jan 01 00:00:00 1970 +0000
319 | summary: add new_c
327 | summary: add new_c
320 |
328 |
321 | o changeset: 2:245bde4270cd
329 | o changeset: 2:245bde4270cd
322 |/ user: test
330 |/ user: test
323 | date: Thu Jan 01 00:00:00 1970 +0000
331 | date: Thu Jan 01 00:00:00 1970 +0000
324 | summary: add original_c
332 | summary: add original_c
325 |
333 |
326 o changeset: 1:7c3bad9141dc
334 o changeset: 1:7c3bad9141dc
327 | user: test
335 | user: test
328 | date: Thu Jan 01 00:00:00 1970 +0000
336 | date: Thu Jan 01 00:00:00 1970 +0000
329 | summary: add b
337 | summary: add b
330 |
338 |
331 o changeset: 0:1f0dee641bb7
339 o changeset: 0:1f0dee641bb7
332 user: test
340 user: test
333 date: Thu Jan 01 00:00:00 1970 +0000
341 date: Thu Jan 01 00:00:00 1970 +0000
334 summary: add a
342 summary: add a
335
343
336 $ hg -R clone-dest debugobsolete
344 $ hg -R clone-dest debugobsolete
337 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C {'date': '56 12', 'user': 'test'}
345 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C {'date': '56 12', 'user': 'test'}
338 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
346 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
339 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
347 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
340 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
348 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
341 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 {'date': '1338 0', 'user': 'test'}
349 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 {'date': '1338 0', 'user': 'test'}
342
350
343
351
344 Destination repo have existing data
352 Destination repo have existing data
345 ---------------------------------------
353 ---------------------------------------
346
354
347 On pull
355 On pull
348
356
349 $ hg init tmpe
357 $ hg init tmpe
350 $ cd tmpe
358 $ cd tmpe
351 $ hg debugobsolete -d '1339 0' 2448244824482448244824482448244824482448 1339133913391339133913391339133913391339
359 $ hg debugobsolete -d '1339 0' 2448244824482448244824482448244824482448 1339133913391339133913391339133913391339
352 $ hg pull ../tmpb
360 $ hg pull ../tmpb
353 pulling from ../tmpb
361 pulling from ../tmpb
354 requesting all changes
362 requesting all changes
355 adding changesets
363 adding changesets
356 adding manifests
364 adding manifests
357 adding file changes
365 adding file changes
358 added 4 changesets with 4 changes to 4 files (+1 heads)
366 added 4 changesets with 4 changes to 4 files (+1 heads)
359 (run 'hg heads' to see heads, 'hg merge' to merge)
367 (run 'hg heads' to see heads, 'hg merge' to merge)
360 $ hg debugobsolete
368 $ hg debugobsolete
361 2448244824482448244824482448244824482448 1339133913391339133913391339133913391339 0 {'date': '1339 0', 'user': 'test'}
369 2448244824482448244824482448244824482448 1339133913391339133913391339133913391339 0 {'date': '1339 0', 'user': 'test'}
362 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C {'date': '56 12', 'user': 'test'}
370 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C {'date': '56 12', 'user': 'test'}
363 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
371 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
364 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
372 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
365 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
373 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
366 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 {'date': '1338 0', 'user': 'test'}
374 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 {'date': '1338 0', 'user': 'test'}
367
375
368
376
369 On push
377 On push
370
378
371 $ hg push ../tmpc
379 $ hg push ../tmpc
372 pushing to ../tmpc
380 pushing to ../tmpc
373 searching for changes
381 searching for changes
374 no changes found
382 no changes found
375 [1]
383 [1]
376 $ hg -R ../tmpc debugobsolete
384 $ hg -R ../tmpc debugobsolete
377 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C {'date': '56 12', 'user': 'test'}
385 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C {'date': '56 12', 'user': 'test'}
378 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
386 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
379 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
387 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
380 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
388 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
381 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 {'date': '1338 0', 'user': 'test'}
389 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 {'date': '1338 0', 'user': 'test'}
382 2448244824482448244824482448244824482448 1339133913391339133913391339133913391339 0 {'date': '1339 0', 'user': 'test'}
390 2448244824482448244824482448244824482448 1339133913391339133913391339133913391339 0 {'date': '1339 0', 'user': 'test'}
383
391
384 detect outgoing obsolete and unstable
392 detect outgoing obsolete and unstable
385 ---------------------------------------
393 ---------------------------------------
386
394
387
395
388 $ hg glog
396 $ hg glog
389 o changeset: 3:6f9641995072
397 o changeset: 3:6f9641995072
390 | tag: tip
398 | tag: tip
391 | parent: 1:7c3bad9141dc
399 | parent: 1:7c3bad9141dc
392 | user: test
400 | user: test
393 | date: Thu Jan 01 00:00:00 1970 +0000
401 | date: Thu Jan 01 00:00:00 1970 +0000
394 | summary: add n3w_3_c
402 | summary: add n3w_3_c
395 |
403 |
396 | o changeset: 2:245bde4270cd
404 | o changeset: 2:245bde4270cd
397 |/ user: test
405 |/ user: test
398 | date: Thu Jan 01 00:00:00 1970 +0000
406 | date: Thu Jan 01 00:00:00 1970 +0000
399 | summary: add original_c
407 | summary: add original_c
400 |
408 |
401 o changeset: 1:7c3bad9141dc
409 o changeset: 1:7c3bad9141dc
402 | user: test
410 | user: test
403 | date: Thu Jan 01 00:00:00 1970 +0000
411 | date: Thu Jan 01 00:00:00 1970 +0000
404 | summary: add b
412 | summary: add b
405 |
413 |
406 o changeset: 0:1f0dee641bb7
414 o changeset: 0:1f0dee641bb7
407 user: test
415 user: test
408 date: Thu Jan 01 00:00:00 1970 +0000
416 date: Thu Jan 01 00:00:00 1970 +0000
409 summary: add a
417 summary: add a
410
418
411 $ hg up 'desc("n3w_3_c")'
419 $ hg up 'desc("n3w_3_c")'
412 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
420 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
413 $ mkcommit original_d
421 $ mkcommit original_d
414 $ mkcommit original_e
422 $ mkcommit original_e
415 $ hg debugobsolete `getid original_d` -d '0 0'
423 $ hg debugobsolete `getid original_d` -d '0 0'
416 $ hg log -r 'obsolete()'
424 $ hg log -r 'obsolete()'
417 changeset: 4:94b33453f93b
425 changeset: 4:94b33453f93b
418 user: test
426 user: test
419 date: Thu Jan 01 00:00:00 1970 +0000
427 date: Thu Jan 01 00:00:00 1970 +0000
420 summary: add original_d
428 summary: add original_d
421
429
422 $ hg glog -r '::unstable()'
430 $ hg glog -r '::unstable()'
423 @ changeset: 5:cda648ca50f5
431 @ changeset: 5:cda648ca50f5
424 | tag: tip
432 | tag: tip
425 | user: test
433 | user: test
426 | date: Thu Jan 01 00:00:00 1970 +0000
434 | date: Thu Jan 01 00:00:00 1970 +0000
427 | summary: add original_e
435 | summary: add original_e
428 |
436 |
429 x changeset: 4:94b33453f93b
437 x changeset: 4:94b33453f93b
430 | user: test
438 | user: test
431 | date: Thu Jan 01 00:00:00 1970 +0000
439 | date: Thu Jan 01 00:00:00 1970 +0000
432 | summary: add original_d
440 | summary: add original_d
433 |
441 |
434 o changeset: 3:6f9641995072
442 o changeset: 3:6f9641995072
435 | parent: 1:7c3bad9141dc
443 | parent: 1:7c3bad9141dc
436 | user: test
444 | user: test
437 | date: Thu Jan 01 00:00:00 1970 +0000
445 | date: Thu Jan 01 00:00:00 1970 +0000
438 | summary: add n3w_3_c
446 | summary: add n3w_3_c
439 |
447 |
440 o changeset: 1:7c3bad9141dc
448 o changeset: 1:7c3bad9141dc
441 | user: test
449 | user: test
442 | date: Thu Jan 01 00:00:00 1970 +0000
450 | date: Thu Jan 01 00:00:00 1970 +0000
443 | summary: add b
451 | summary: add b
444 |
452 |
445 o changeset: 0:1f0dee641bb7
453 o changeset: 0:1f0dee641bb7
446 user: test
454 user: test
447 date: Thu Jan 01 00:00:00 1970 +0000
455 date: Thu Jan 01 00:00:00 1970 +0000
448 summary: add a
456 summary: add a
449
457
450
458
451 refuse to push obsolete changeset
459 refuse to push obsolete changeset
452
460
453 $ hg push ../tmpc/ -r 'desc("original_d")'
461 $ hg push ../tmpc/ -r 'desc("original_d")'
454 pushing to ../tmpc/
462 pushing to ../tmpc/
455 searching for changes
463 searching for changes
456 abort: push includes obsolete changeset: 94b33453f93b!
464 abort: push includes obsolete changeset: 94b33453f93b!
457 [255]
465 [255]
458
466
459 refuse to push unstable changeset
467 refuse to push unstable changeset
460
468
461 $ hg push ../tmpc/
469 $ hg push ../tmpc/
462 pushing to ../tmpc/
470 pushing to ../tmpc/
463 searching for changes
471 searching for changes
464 abort: push includes unstable changeset: cda648ca50f5!
472 abort: push includes unstable changeset: cda648ca50f5!
465 [255]
473 [255]
466
474
467 Test that extinct changeset are properly detected
475 Test that extinct changeset are properly detected
468
476
469 $ hg log -r 'extinct()'
477 $ hg log -r 'extinct()'
470
478
471 Don't try to push extinct changeset
479 Don't try to push extinct changeset
472
480
473 $ hg init ../tmpf
481 $ hg init ../tmpf
474 $ hg out ../tmpf
482 $ hg out ../tmpf
475 comparing with ../tmpf
483 comparing with ../tmpf
476 searching for changes
484 searching for changes
477 changeset: 0:1f0dee641bb7
485 changeset: 0:1f0dee641bb7
478 user: test
486 user: test
479 date: Thu Jan 01 00:00:00 1970 +0000
487 date: Thu Jan 01 00:00:00 1970 +0000
480 summary: add a
488 summary: add a
481
489
482 changeset: 1:7c3bad9141dc
490 changeset: 1:7c3bad9141dc
483 user: test
491 user: test
484 date: Thu Jan 01 00:00:00 1970 +0000
492 date: Thu Jan 01 00:00:00 1970 +0000
485 summary: add b
493 summary: add b
486
494
487 changeset: 2:245bde4270cd
495 changeset: 2:245bde4270cd
488 user: test
496 user: test
489 date: Thu Jan 01 00:00:00 1970 +0000
497 date: Thu Jan 01 00:00:00 1970 +0000
490 summary: add original_c
498 summary: add original_c
491
499
492 changeset: 3:6f9641995072
500 changeset: 3:6f9641995072
493 parent: 1:7c3bad9141dc
501 parent: 1:7c3bad9141dc
494 user: test
502 user: test
495 date: Thu Jan 01 00:00:00 1970 +0000
503 date: Thu Jan 01 00:00:00 1970 +0000
496 summary: add n3w_3_c
504 summary: add n3w_3_c
497
505
498 changeset: 4:94b33453f93b
506 changeset: 4:94b33453f93b
499 user: test
507 user: test
500 date: Thu Jan 01 00:00:00 1970 +0000
508 date: Thu Jan 01 00:00:00 1970 +0000
501 summary: add original_d
509 summary: add original_d
502
510
503 changeset: 5:cda648ca50f5
511 changeset: 5:cda648ca50f5
504 tag: tip
512 tag: tip
505 user: test
513 user: test
506 date: Thu Jan 01 00:00:00 1970 +0000
514 date: Thu Jan 01 00:00:00 1970 +0000
507 summary: add original_e
515 summary: add original_e
508
516
509 $ hg push ../tmpf -f # -f because be push unstable too
517 $ hg push ../tmpf -f # -f because be push unstable too
510 pushing to ../tmpf
518 pushing to ../tmpf
511 searching for changes
519 searching for changes
512 adding changesets
520 adding changesets
513 adding manifests
521 adding manifests
514 adding file changes
522 adding file changes
515 added 6 changesets with 6 changes to 6 files (+1 heads)
523 added 6 changesets with 6 changes to 6 files (+1 heads)
516
524
517 no warning displayed
525 no warning displayed
518
526
519 $ hg push ../tmpf
527 $ hg push ../tmpf
520 pushing to ../tmpf
528 pushing to ../tmpf
521 searching for changes
529 searching for changes
522 no changes found
530 no changes found
523 [1]
531 [1]
524
532
525 Do not warn about new head when the new head is a successors of a remote one
533 Do not warn about new head when the new head is a successors of a remote one
526
534
527 $ hg glog
535 $ hg glog
528 @ changeset: 5:cda648ca50f5
536 @ changeset: 5:cda648ca50f5
529 | tag: tip
537 | tag: tip
530 | user: test
538 | user: test
531 | date: Thu Jan 01 00:00:00 1970 +0000
539 | date: Thu Jan 01 00:00:00 1970 +0000
532 | summary: add original_e
540 | summary: add original_e
533 |
541 |
534 x changeset: 4:94b33453f93b
542 x changeset: 4:94b33453f93b
535 | user: test
543 | user: test
536 | date: Thu Jan 01 00:00:00 1970 +0000
544 | date: Thu Jan 01 00:00:00 1970 +0000
537 | summary: add original_d
545 | summary: add original_d
538 |
546 |
539 o changeset: 3:6f9641995072
547 o changeset: 3:6f9641995072
540 | parent: 1:7c3bad9141dc
548 | parent: 1:7c3bad9141dc
541 | user: test
549 | user: test
542 | date: Thu Jan 01 00:00:00 1970 +0000
550 | date: Thu Jan 01 00:00:00 1970 +0000
543 | summary: add n3w_3_c
551 | summary: add n3w_3_c
544 |
552 |
545 | o changeset: 2:245bde4270cd
553 | o changeset: 2:245bde4270cd
546 |/ user: test
554 |/ user: test
547 | date: Thu Jan 01 00:00:00 1970 +0000
555 | date: Thu Jan 01 00:00:00 1970 +0000
548 | summary: add original_c
556 | summary: add original_c
549 |
557 |
550 o changeset: 1:7c3bad9141dc
558 o changeset: 1:7c3bad9141dc
551 | user: test
559 | user: test
552 | date: Thu Jan 01 00:00:00 1970 +0000
560 | date: Thu Jan 01 00:00:00 1970 +0000
553 | summary: add b
561 | summary: add b
554 |
562 |
555 o changeset: 0:1f0dee641bb7
563 o changeset: 0:1f0dee641bb7
556 user: test
564 user: test
557 date: Thu Jan 01 00:00:00 1970 +0000
565 date: Thu Jan 01 00:00:00 1970 +0000
558 summary: add a
566 summary: add a
559
567
560 $ hg up -q 'desc(n3w_3_c)'
568 $ hg up -q 'desc(n3w_3_c)'
561 $ mkcommit obsolete_e
569 $ mkcommit obsolete_e
562 created new head
570 created new head
563 $ hg debugobsolete `getid 'original_e'` `getid 'obsolete_e'`
571 $ hg debugobsolete `getid 'original_e'` `getid 'obsolete_e'`
564 $ hg push ../tmpf
572 $ hg push ../tmpf
565 pushing to ../tmpf
573 pushing to ../tmpf
566 searching for changes
574 searching for changes
567 adding changesets
575 adding changesets
568 adding manifests
576 adding manifests
569 adding file changes
577 adding file changes
570 added 1 changesets with 1 changes to 1 files (+1 heads)
578 added 1 changesets with 1 changes to 1 files (+1 heads)
571
579
572 Checking _enable=False warning if obsolete marker exists
580 Checking _enable=False warning if obsolete marker exists
573
581
574 $ echo '[extensions]' >> $HGRCPATH
582 $ echo '[extensions]' >> $HGRCPATH
575 $ echo "obs=!" >> $HGRCPATH
583 $ echo "obs=!" >> $HGRCPATH
576 $ hg log -r tip
584 $ hg log -r tip
577 obsolete feature not enabled but 8 markers found!
585 obsolete feature not enabled but 8 markers found!
578 changeset: 6:3de5eca88c00
586 changeset: 6:3de5eca88c00
579 tag: tip
587 tag: tip
580 parent: 3:6f9641995072
588 parent: 3:6f9641995072
581 user: test
589 user: test
582 date: Thu Jan 01 00:00:00 1970 +0000
590 date: Thu Jan 01 00:00:00 1970 +0000
583 summary: add obsolete_e
591 summary: add obsolete_e
584
592
General Comments 0
You need to be logged in to leave comments. Login now