##// END OF EJS Templates
localrepo: filter unknown nodes from the phasecache on destroyed...
Idan Kamara -
r18221:082d6929 default
parent child Browse files
Show More
@@ -1,2574 +1,2586 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock, transaction, store, encoding, base85
11 import lock, transaction, store, encoding, base85
12 import scmutil, util, extensions, hook, error, revset
12 import scmutil, util, extensions, hook, error, revset
13 import match as matchmod
13 import match as matchmod
14 import merge as mergemod
14 import merge as mergemod
15 import tags as tagsmod
15 import tags as tagsmod
16 from lock import release
16 from lock import release
17 import weakref, errno, os, time, inspect
17 import weakref, errno, os, time, inspect
18 import branchmap
18 import branchmap
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class repofilecache(filecache):
22 class repofilecache(filecache):
23 """All filecache usage on repo are done for logic that should be unfiltered
23 """All filecache usage on repo are done for logic that should be unfiltered
24 """
24 """
25
25
26 def __get__(self, repo, type=None):
26 def __get__(self, repo, type=None):
27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 def __set__(self, repo, value):
28 def __set__(self, repo, value):
29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 def __delete__(self, repo):
30 def __delete__(self, repo):
31 return super(repofilecache, self).__delete__(repo.unfiltered())
31 return super(repofilecache, self).__delete__(repo.unfiltered())
32
32
33 class storecache(repofilecache):
33 class storecache(repofilecache):
34 """filecache for files in the store"""
34 """filecache for files in the store"""
35 def join(self, obj, fname):
35 def join(self, obj, fname):
36 return obj.sjoin(fname)
36 return obj.sjoin(fname)
37
37
38 class unfilteredpropertycache(propertycache):
38 class unfilteredpropertycache(propertycache):
39 """propertycache that apply to unfiltered repo only"""
39 """propertycache that apply to unfiltered repo only"""
40
40
41 def __get__(self, repo, type=None):
41 def __get__(self, repo, type=None):
42 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
42 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
43
43
44 class filteredpropertycache(propertycache):
44 class filteredpropertycache(propertycache):
45 """propertycache that must take filtering in account"""
45 """propertycache that must take filtering in account"""
46
46
47 def cachevalue(self, obj, value):
47 def cachevalue(self, obj, value):
48 object.__setattr__(obj, self.name, value)
48 object.__setattr__(obj, self.name, value)
49
49
50
50
51 def hasunfilteredcache(repo, name):
51 def hasunfilteredcache(repo, name):
52 """check if an repo and a unfilteredproperty cached value for <name>"""
52 """check if an repo and a unfilteredproperty cached value for <name>"""
53 return name in vars(repo.unfiltered())
53 return name in vars(repo.unfiltered())
54
54
55 def unfilteredmethod(orig):
55 def unfilteredmethod(orig):
56 """decorate method that always need to be run on unfiltered version"""
56 """decorate method that always need to be run on unfiltered version"""
57 def wrapper(repo, *args, **kwargs):
57 def wrapper(repo, *args, **kwargs):
58 return orig(repo.unfiltered(), *args, **kwargs)
58 return orig(repo.unfiltered(), *args, **kwargs)
59 return wrapper
59 return wrapper
60
60
61 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
61 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
62 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
62 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
63
63
64 class localpeer(peer.peerrepository):
64 class localpeer(peer.peerrepository):
65 '''peer for a local repo; reflects only the most recent API'''
65 '''peer for a local repo; reflects only the most recent API'''
66
66
67 def __init__(self, repo, caps=MODERNCAPS):
67 def __init__(self, repo, caps=MODERNCAPS):
68 peer.peerrepository.__init__(self)
68 peer.peerrepository.__init__(self)
69 self._repo = repo
69 self._repo = repo
70 self.ui = repo.ui
70 self.ui = repo.ui
71 self._caps = repo._restrictcapabilities(caps)
71 self._caps = repo._restrictcapabilities(caps)
72 self.requirements = repo.requirements
72 self.requirements = repo.requirements
73 self.supportedformats = repo.supportedformats
73 self.supportedformats = repo.supportedformats
74
74
75 def close(self):
75 def close(self):
76 self._repo.close()
76 self._repo.close()
77
77
78 def _capabilities(self):
78 def _capabilities(self):
79 return self._caps
79 return self._caps
80
80
81 def local(self):
81 def local(self):
82 return self._repo
82 return self._repo
83
83
84 def canpush(self):
84 def canpush(self):
85 return True
85 return True
86
86
87 def url(self):
87 def url(self):
88 return self._repo.url()
88 return self._repo.url()
89
89
90 def lookup(self, key):
90 def lookup(self, key):
91 return self._repo.lookup(key)
91 return self._repo.lookup(key)
92
92
93 def branchmap(self):
93 def branchmap(self):
94 return discovery.visiblebranchmap(self._repo)
94 return discovery.visiblebranchmap(self._repo)
95
95
96 def heads(self):
96 def heads(self):
97 return discovery.visibleheads(self._repo)
97 return discovery.visibleheads(self._repo)
98
98
99 def known(self, nodes):
99 def known(self, nodes):
100 return self._repo.known(nodes)
100 return self._repo.known(nodes)
101
101
102 def getbundle(self, source, heads=None, common=None):
102 def getbundle(self, source, heads=None, common=None):
103 return self._repo.getbundle(source, heads=heads, common=common)
103 return self._repo.getbundle(source, heads=heads, common=common)
104
104
105 # TODO We might want to move the next two calls into legacypeer and add
105 # TODO We might want to move the next two calls into legacypeer and add
106 # unbundle instead.
106 # unbundle instead.
107
107
108 def lock(self):
108 def lock(self):
109 return self._repo.lock()
109 return self._repo.lock()
110
110
111 def addchangegroup(self, cg, source, url):
111 def addchangegroup(self, cg, source, url):
112 return self._repo.addchangegroup(cg, source, url)
112 return self._repo.addchangegroup(cg, source, url)
113
113
114 def pushkey(self, namespace, key, old, new):
114 def pushkey(self, namespace, key, old, new):
115 return self._repo.pushkey(namespace, key, old, new)
115 return self._repo.pushkey(namespace, key, old, new)
116
116
117 def listkeys(self, namespace):
117 def listkeys(self, namespace):
118 return self._repo.listkeys(namespace)
118 return self._repo.listkeys(namespace)
119
119
120 def debugwireargs(self, one, two, three=None, four=None, five=None):
120 def debugwireargs(self, one, two, three=None, four=None, five=None):
121 '''used to test argument passing over the wire'''
121 '''used to test argument passing over the wire'''
122 return "%s %s %s %s %s" % (one, two, three, four, five)
122 return "%s %s %s %s %s" % (one, two, three, four, five)
123
123
124 class locallegacypeer(localpeer):
124 class locallegacypeer(localpeer):
125 '''peer extension which implements legacy methods too; used for tests with
125 '''peer extension which implements legacy methods too; used for tests with
126 restricted capabilities'''
126 restricted capabilities'''
127
127
128 def __init__(self, repo):
128 def __init__(self, repo):
129 localpeer.__init__(self, repo, caps=LEGACYCAPS)
129 localpeer.__init__(self, repo, caps=LEGACYCAPS)
130
130
131 def branches(self, nodes):
131 def branches(self, nodes):
132 return self._repo.branches(nodes)
132 return self._repo.branches(nodes)
133
133
134 def between(self, pairs):
134 def between(self, pairs):
135 return self._repo.between(pairs)
135 return self._repo.between(pairs)
136
136
137 def changegroup(self, basenodes, source):
137 def changegroup(self, basenodes, source):
138 return self._repo.changegroup(basenodes, source)
138 return self._repo.changegroup(basenodes, source)
139
139
140 def changegroupsubset(self, bases, heads, source):
140 def changegroupsubset(self, bases, heads, source):
141 return self._repo.changegroupsubset(bases, heads, source)
141 return self._repo.changegroupsubset(bases, heads, source)
142
142
143 class localrepository(object):
143 class localrepository(object):
144
144
145 supportedformats = set(('revlogv1', 'generaldelta'))
145 supportedformats = set(('revlogv1', 'generaldelta'))
146 supported = supportedformats | set(('store', 'fncache', 'shared',
146 supported = supportedformats | set(('store', 'fncache', 'shared',
147 'dotencode'))
147 'dotencode'))
148 openerreqs = set(('revlogv1', 'generaldelta'))
148 openerreqs = set(('revlogv1', 'generaldelta'))
149 requirements = ['revlogv1']
149 requirements = ['revlogv1']
150 filtername = None
150 filtername = None
151
151
152 def _baserequirements(self, create):
152 def _baserequirements(self, create):
153 return self.requirements[:]
153 return self.requirements[:]
154
154
155 def __init__(self, baseui, path=None, create=False):
155 def __init__(self, baseui, path=None, create=False):
156 self.wvfs = scmutil.vfs(path, expand=True)
156 self.wvfs = scmutil.vfs(path, expand=True)
157 self.wopener = self.wvfs
157 self.wopener = self.wvfs
158 self.root = self.wvfs.base
158 self.root = self.wvfs.base
159 self.path = self.wvfs.join(".hg")
159 self.path = self.wvfs.join(".hg")
160 self.origroot = path
160 self.origroot = path
161 self.auditor = scmutil.pathauditor(self.root, self._checknested)
161 self.auditor = scmutil.pathauditor(self.root, self._checknested)
162 self.vfs = scmutil.vfs(self.path)
162 self.vfs = scmutil.vfs(self.path)
163 self.opener = self.vfs
163 self.opener = self.vfs
164 self.baseui = baseui
164 self.baseui = baseui
165 self.ui = baseui.copy()
165 self.ui = baseui.copy()
166 # A list of callback to shape the phase if no data were found.
166 # A list of callback to shape the phase if no data were found.
167 # Callback are in the form: func(repo, roots) --> processed root.
167 # Callback are in the form: func(repo, roots) --> processed root.
168 # This list it to be filled by extension during repo setup
168 # This list it to be filled by extension during repo setup
169 self._phasedefaults = []
169 self._phasedefaults = []
170 try:
170 try:
171 self.ui.readconfig(self.join("hgrc"), self.root)
171 self.ui.readconfig(self.join("hgrc"), self.root)
172 extensions.loadall(self.ui)
172 extensions.loadall(self.ui)
173 except IOError:
173 except IOError:
174 pass
174 pass
175
175
176 if not self.vfs.isdir():
176 if not self.vfs.isdir():
177 if create:
177 if create:
178 if not self.wvfs.exists():
178 if not self.wvfs.exists():
179 self.wvfs.makedirs()
179 self.wvfs.makedirs()
180 self.vfs.makedir(notindexed=True)
180 self.vfs.makedir(notindexed=True)
181 requirements = self._baserequirements(create)
181 requirements = self._baserequirements(create)
182 if self.ui.configbool('format', 'usestore', True):
182 if self.ui.configbool('format', 'usestore', True):
183 self.vfs.mkdir("store")
183 self.vfs.mkdir("store")
184 requirements.append("store")
184 requirements.append("store")
185 if self.ui.configbool('format', 'usefncache', True):
185 if self.ui.configbool('format', 'usefncache', True):
186 requirements.append("fncache")
186 requirements.append("fncache")
187 if self.ui.configbool('format', 'dotencode', True):
187 if self.ui.configbool('format', 'dotencode', True):
188 requirements.append('dotencode')
188 requirements.append('dotencode')
189 # create an invalid changelog
189 # create an invalid changelog
190 self.vfs.append(
190 self.vfs.append(
191 "00changelog.i",
191 "00changelog.i",
192 '\0\0\0\2' # represents revlogv2
192 '\0\0\0\2' # represents revlogv2
193 ' dummy changelog to prevent using the old repo layout'
193 ' dummy changelog to prevent using the old repo layout'
194 )
194 )
195 if self.ui.configbool('format', 'generaldelta', False):
195 if self.ui.configbool('format', 'generaldelta', False):
196 requirements.append("generaldelta")
196 requirements.append("generaldelta")
197 requirements = set(requirements)
197 requirements = set(requirements)
198 else:
198 else:
199 raise error.RepoError(_("repository %s not found") % path)
199 raise error.RepoError(_("repository %s not found") % path)
200 elif create:
200 elif create:
201 raise error.RepoError(_("repository %s already exists") % path)
201 raise error.RepoError(_("repository %s already exists") % path)
202 else:
202 else:
203 try:
203 try:
204 requirements = scmutil.readrequires(self.vfs, self.supported)
204 requirements = scmutil.readrequires(self.vfs, self.supported)
205 except IOError, inst:
205 except IOError, inst:
206 if inst.errno != errno.ENOENT:
206 if inst.errno != errno.ENOENT:
207 raise
207 raise
208 requirements = set()
208 requirements = set()
209
209
210 self.sharedpath = self.path
210 self.sharedpath = self.path
211 try:
211 try:
212 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
212 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
213 if not os.path.exists(s):
213 if not os.path.exists(s):
214 raise error.RepoError(
214 raise error.RepoError(
215 _('.hg/sharedpath points to nonexistent directory %s') % s)
215 _('.hg/sharedpath points to nonexistent directory %s') % s)
216 self.sharedpath = s
216 self.sharedpath = s
217 except IOError, inst:
217 except IOError, inst:
218 if inst.errno != errno.ENOENT:
218 if inst.errno != errno.ENOENT:
219 raise
219 raise
220
220
221 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
221 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
222 self.spath = self.store.path
222 self.spath = self.store.path
223 self.svfs = self.store.vfs
223 self.svfs = self.store.vfs
224 self.sopener = self.svfs
224 self.sopener = self.svfs
225 self.sjoin = self.store.join
225 self.sjoin = self.store.join
226 self.vfs.createmode = self.store.createmode
226 self.vfs.createmode = self.store.createmode
227 self._applyrequirements(requirements)
227 self._applyrequirements(requirements)
228 if create:
228 if create:
229 self._writerequirements()
229 self._writerequirements()
230
230
231
231
232 self._branchcaches = {}
232 self._branchcaches = {}
233 self.filterpats = {}
233 self.filterpats = {}
234 self._datafilters = {}
234 self._datafilters = {}
235 self._transref = self._lockref = self._wlockref = None
235 self._transref = self._lockref = self._wlockref = None
236
236
237 # A cache for various files under .hg/ that tracks file changes,
237 # A cache for various files under .hg/ that tracks file changes,
238 # (used by the filecache decorator)
238 # (used by the filecache decorator)
239 #
239 #
240 # Maps a property name to its util.filecacheentry
240 # Maps a property name to its util.filecacheentry
241 self._filecache = {}
241 self._filecache = {}
242
242
243 # hold sets of revision to be filtered
243 # hold sets of revision to be filtered
244 # should be cleared when something might have changed the filter value:
244 # should be cleared when something might have changed the filter value:
245 # - new changesets,
245 # - new changesets,
246 # - phase change,
246 # - phase change,
247 # - new obsolescence marker,
247 # - new obsolescence marker,
248 # - working directory parent change,
248 # - working directory parent change,
249 # - bookmark changes
249 # - bookmark changes
250 self.filteredrevcache = {}
250 self.filteredrevcache = {}
251
251
252 def close(self):
252 def close(self):
253 pass
253 pass
254
254
255 def _restrictcapabilities(self, caps):
255 def _restrictcapabilities(self, caps):
256 return caps
256 return caps
257
257
258 def _applyrequirements(self, requirements):
258 def _applyrequirements(self, requirements):
259 self.requirements = requirements
259 self.requirements = requirements
260 self.sopener.options = dict((r, 1) for r in requirements
260 self.sopener.options = dict((r, 1) for r in requirements
261 if r in self.openerreqs)
261 if r in self.openerreqs)
262
262
263 def _writerequirements(self):
263 def _writerequirements(self):
264 reqfile = self.opener("requires", "w")
264 reqfile = self.opener("requires", "w")
265 for r in self.requirements:
265 for r in self.requirements:
266 reqfile.write("%s\n" % r)
266 reqfile.write("%s\n" % r)
267 reqfile.close()
267 reqfile.close()
268
268
269 def _checknested(self, path):
269 def _checknested(self, path):
270 """Determine if path is a legal nested repository."""
270 """Determine if path is a legal nested repository."""
271 if not path.startswith(self.root):
271 if not path.startswith(self.root):
272 return False
272 return False
273 subpath = path[len(self.root) + 1:]
273 subpath = path[len(self.root) + 1:]
274 normsubpath = util.pconvert(subpath)
274 normsubpath = util.pconvert(subpath)
275
275
276 # XXX: Checking against the current working copy is wrong in
276 # XXX: Checking against the current working copy is wrong in
277 # the sense that it can reject things like
277 # the sense that it can reject things like
278 #
278 #
279 # $ hg cat -r 10 sub/x.txt
279 # $ hg cat -r 10 sub/x.txt
280 #
280 #
281 # if sub/ is no longer a subrepository in the working copy
281 # if sub/ is no longer a subrepository in the working copy
282 # parent revision.
282 # parent revision.
283 #
283 #
284 # However, it can of course also allow things that would have
284 # However, it can of course also allow things that would have
285 # been rejected before, such as the above cat command if sub/
285 # been rejected before, such as the above cat command if sub/
286 # is a subrepository now, but was a normal directory before.
286 # is a subrepository now, but was a normal directory before.
287 # The old path auditor would have rejected by mistake since it
287 # The old path auditor would have rejected by mistake since it
288 # panics when it sees sub/.hg/.
288 # panics when it sees sub/.hg/.
289 #
289 #
290 # All in all, checking against the working copy seems sensible
290 # All in all, checking against the working copy seems sensible
291 # since we want to prevent access to nested repositories on
291 # since we want to prevent access to nested repositories on
292 # the filesystem *now*.
292 # the filesystem *now*.
293 ctx = self[None]
293 ctx = self[None]
294 parts = util.splitpath(subpath)
294 parts = util.splitpath(subpath)
295 while parts:
295 while parts:
296 prefix = '/'.join(parts)
296 prefix = '/'.join(parts)
297 if prefix in ctx.substate:
297 if prefix in ctx.substate:
298 if prefix == normsubpath:
298 if prefix == normsubpath:
299 return True
299 return True
300 else:
300 else:
301 sub = ctx.sub(prefix)
301 sub = ctx.sub(prefix)
302 return sub.checknested(subpath[len(prefix) + 1:])
302 return sub.checknested(subpath[len(prefix) + 1:])
303 else:
303 else:
304 parts.pop()
304 parts.pop()
305 return False
305 return False
306
306
307 def peer(self):
307 def peer(self):
308 return localpeer(self) # not cached to avoid reference cycle
308 return localpeer(self) # not cached to avoid reference cycle
309
309
310 def unfiltered(self):
310 def unfiltered(self):
311 """Return unfiltered version of the repository
311 """Return unfiltered version of the repository
312
312
313 Intended to be ovewritten by filtered repo."""
313 Intended to be ovewritten by filtered repo."""
314 return self
314 return self
315
315
316 def filtered(self, name):
316 def filtered(self, name):
317 """Return a filtered version of a repository"""
317 """Return a filtered version of a repository"""
318 # build a new class with the mixin and the current class
318 # build a new class with the mixin and the current class
319 # (possibily subclass of the repo)
319 # (possibily subclass of the repo)
320 class proxycls(repoview.repoview, self.unfiltered().__class__):
320 class proxycls(repoview.repoview, self.unfiltered().__class__):
321 pass
321 pass
322 return proxycls(self, name)
322 return proxycls(self, name)
323
323
324 @repofilecache('bookmarks')
324 @repofilecache('bookmarks')
325 def _bookmarks(self):
325 def _bookmarks(self):
326 return bookmarks.bmstore(self)
326 return bookmarks.bmstore(self)
327
327
328 @repofilecache('bookmarks.current')
328 @repofilecache('bookmarks.current')
329 def _bookmarkcurrent(self):
329 def _bookmarkcurrent(self):
330 return bookmarks.readcurrent(self)
330 return bookmarks.readcurrent(self)
331
331
332 def bookmarkheads(self, bookmark):
332 def bookmarkheads(self, bookmark):
333 name = bookmark.split('@', 1)[0]
333 name = bookmark.split('@', 1)[0]
334 heads = []
334 heads = []
335 for mark, n in self._bookmarks.iteritems():
335 for mark, n in self._bookmarks.iteritems():
336 if mark.split('@', 1)[0] == name:
336 if mark.split('@', 1)[0] == name:
337 heads.append(n)
337 heads.append(n)
338 return heads
338 return heads
339
339
340 @storecache('phaseroots')
340 @storecache('phaseroots')
341 def _phasecache(self):
341 def _phasecache(self):
342 return phases.phasecache(self, self._phasedefaults)
342 return phases.phasecache(self, self._phasedefaults)
343
343
344 @storecache('obsstore')
344 @storecache('obsstore')
345 def obsstore(self):
345 def obsstore(self):
346 store = obsolete.obsstore(self.sopener)
346 store = obsolete.obsstore(self.sopener)
347 if store and not obsolete._enabled:
347 if store and not obsolete._enabled:
348 # message is rare enough to not be translated
348 # message is rare enough to not be translated
349 msg = 'obsolete feature not enabled but %i markers found!\n'
349 msg = 'obsolete feature not enabled but %i markers found!\n'
350 self.ui.warn(msg % len(list(store)))
350 self.ui.warn(msg % len(list(store)))
351 return store
351 return store
352
352
353 @unfilteredpropertycache
353 @unfilteredpropertycache
354 def hiddenrevs(self):
354 def hiddenrevs(self):
355 """hiddenrevs: revs that should be hidden by command and tools
355 """hiddenrevs: revs that should be hidden by command and tools
356
356
357 This set is carried on the repo to ease initialization and lazy
357 This set is carried on the repo to ease initialization and lazy
358 loading; it'll probably move back to changelog for efficiency and
358 loading; it'll probably move back to changelog for efficiency and
359 consistency reasons.
359 consistency reasons.
360
360
361 Note that the hiddenrevs will needs invalidations when
361 Note that the hiddenrevs will needs invalidations when
362 - a new changesets is added (possible unstable above extinct)
362 - a new changesets is added (possible unstable above extinct)
363 - a new obsolete marker is added (possible new extinct changeset)
363 - a new obsolete marker is added (possible new extinct changeset)
364
364
365 hidden changesets cannot have non-hidden descendants
365 hidden changesets cannot have non-hidden descendants
366 """
366 """
367 hidden = set()
367 hidden = set()
368 if self.obsstore:
368 if self.obsstore:
369 ### hide extinct changeset that are not accessible by any mean
369 ### hide extinct changeset that are not accessible by any mean
370 hiddenquery = 'extinct() - ::(. + bookmark())'
370 hiddenquery = 'extinct() - ::(. + bookmark())'
371 hidden.update(self.revs(hiddenquery))
371 hidden.update(self.revs(hiddenquery))
372 return hidden
372 return hidden
373
373
374 @storecache('00changelog.i')
374 @storecache('00changelog.i')
375 def changelog(self):
375 def changelog(self):
376 c = changelog.changelog(self.sopener)
376 c = changelog.changelog(self.sopener)
377 if 'HG_PENDING' in os.environ:
377 if 'HG_PENDING' in os.environ:
378 p = os.environ['HG_PENDING']
378 p = os.environ['HG_PENDING']
379 if p.startswith(self.root):
379 if p.startswith(self.root):
380 c.readpending('00changelog.i.a')
380 c.readpending('00changelog.i.a')
381 return c
381 return c
382
382
383 @storecache('00manifest.i')
383 @storecache('00manifest.i')
384 def manifest(self):
384 def manifest(self):
385 return manifest.manifest(self.sopener)
385 return manifest.manifest(self.sopener)
386
386
387 @repofilecache('dirstate')
387 @repofilecache('dirstate')
388 def dirstate(self):
388 def dirstate(self):
389 warned = [0]
389 warned = [0]
390 def validate(node):
390 def validate(node):
391 try:
391 try:
392 self.changelog.rev(node)
392 self.changelog.rev(node)
393 return node
393 return node
394 except error.LookupError:
394 except error.LookupError:
395 if not warned[0]:
395 if not warned[0]:
396 warned[0] = True
396 warned[0] = True
397 self.ui.warn(_("warning: ignoring unknown"
397 self.ui.warn(_("warning: ignoring unknown"
398 " working parent %s!\n") % short(node))
398 " working parent %s!\n") % short(node))
399 return nullid
399 return nullid
400
400
401 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
401 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
402
402
403 def __getitem__(self, changeid):
403 def __getitem__(self, changeid):
404 if changeid is None:
404 if changeid is None:
405 return context.workingctx(self)
405 return context.workingctx(self)
406 return context.changectx(self, changeid)
406 return context.changectx(self, changeid)
407
407
408 def __contains__(self, changeid):
408 def __contains__(self, changeid):
409 try:
409 try:
410 return bool(self.lookup(changeid))
410 return bool(self.lookup(changeid))
411 except error.RepoLookupError:
411 except error.RepoLookupError:
412 return False
412 return False
413
413
414 def __nonzero__(self):
414 def __nonzero__(self):
415 return True
415 return True
416
416
417 def __len__(self):
417 def __len__(self):
418 return len(self.changelog)
418 return len(self.changelog)
419
419
420 def __iter__(self):
420 def __iter__(self):
421 return iter(self.changelog)
421 return iter(self.changelog)
422
422
423 def revs(self, expr, *args):
423 def revs(self, expr, *args):
424 '''Return a list of revisions matching the given revset'''
424 '''Return a list of revisions matching the given revset'''
425 expr = revset.formatspec(expr, *args)
425 expr = revset.formatspec(expr, *args)
426 m = revset.match(None, expr)
426 m = revset.match(None, expr)
427 return [r for r in m(self, list(self))]
427 return [r for r in m(self, list(self))]
428
428
429 def set(self, expr, *args):
429 def set(self, expr, *args):
430 '''
430 '''
431 Yield a context for each matching revision, after doing arg
431 Yield a context for each matching revision, after doing arg
432 replacement via revset.formatspec
432 replacement via revset.formatspec
433 '''
433 '''
434 for r in self.revs(expr, *args):
434 for r in self.revs(expr, *args):
435 yield self[r]
435 yield self[r]
436
436
437 def url(self):
437 def url(self):
438 return 'file:' + self.root
438 return 'file:' + self.root
439
439
440 def hook(self, name, throw=False, **args):
440 def hook(self, name, throw=False, **args):
441 return hook.hook(self.ui, self, name, throw, **args)
441 return hook.hook(self.ui, self, name, throw, **args)
442
442
443 @unfilteredmethod
443 @unfilteredmethod
444 def _tag(self, names, node, message, local, user, date, extra={}):
444 def _tag(self, names, node, message, local, user, date, extra={}):
445 if isinstance(names, str):
445 if isinstance(names, str):
446 names = (names,)
446 names = (names,)
447
447
448 branches = self.branchmap()
448 branches = self.branchmap()
449 for name in names:
449 for name in names:
450 self.hook('pretag', throw=True, node=hex(node), tag=name,
450 self.hook('pretag', throw=True, node=hex(node), tag=name,
451 local=local)
451 local=local)
452 if name in branches:
452 if name in branches:
453 self.ui.warn(_("warning: tag %s conflicts with existing"
453 self.ui.warn(_("warning: tag %s conflicts with existing"
454 " branch name\n") % name)
454 " branch name\n") % name)
455
455
456 def writetags(fp, names, munge, prevtags):
456 def writetags(fp, names, munge, prevtags):
457 fp.seek(0, 2)
457 fp.seek(0, 2)
458 if prevtags and prevtags[-1] != '\n':
458 if prevtags and prevtags[-1] != '\n':
459 fp.write('\n')
459 fp.write('\n')
460 for name in names:
460 for name in names:
461 m = munge and munge(name) or name
461 m = munge and munge(name) or name
462 if (self._tagscache.tagtypes and
462 if (self._tagscache.tagtypes and
463 name in self._tagscache.tagtypes):
463 name in self._tagscache.tagtypes):
464 old = self.tags().get(name, nullid)
464 old = self.tags().get(name, nullid)
465 fp.write('%s %s\n' % (hex(old), m))
465 fp.write('%s %s\n' % (hex(old), m))
466 fp.write('%s %s\n' % (hex(node), m))
466 fp.write('%s %s\n' % (hex(node), m))
467 fp.close()
467 fp.close()
468
468
469 prevtags = ''
469 prevtags = ''
470 if local:
470 if local:
471 try:
471 try:
472 fp = self.opener('localtags', 'r+')
472 fp = self.opener('localtags', 'r+')
473 except IOError:
473 except IOError:
474 fp = self.opener('localtags', 'a')
474 fp = self.opener('localtags', 'a')
475 else:
475 else:
476 prevtags = fp.read()
476 prevtags = fp.read()
477
477
478 # local tags are stored in the current charset
478 # local tags are stored in the current charset
479 writetags(fp, names, None, prevtags)
479 writetags(fp, names, None, prevtags)
480 for name in names:
480 for name in names:
481 self.hook('tag', node=hex(node), tag=name, local=local)
481 self.hook('tag', node=hex(node), tag=name, local=local)
482 return
482 return
483
483
484 try:
484 try:
485 fp = self.wfile('.hgtags', 'rb+')
485 fp = self.wfile('.hgtags', 'rb+')
486 except IOError, e:
486 except IOError, e:
487 if e.errno != errno.ENOENT:
487 if e.errno != errno.ENOENT:
488 raise
488 raise
489 fp = self.wfile('.hgtags', 'ab')
489 fp = self.wfile('.hgtags', 'ab')
490 else:
490 else:
491 prevtags = fp.read()
491 prevtags = fp.read()
492
492
493 # committed tags are stored in UTF-8
493 # committed tags are stored in UTF-8
494 writetags(fp, names, encoding.fromlocal, prevtags)
494 writetags(fp, names, encoding.fromlocal, prevtags)
495
495
496 fp.close()
496 fp.close()
497
497
498 self.invalidatecaches()
498 self.invalidatecaches()
499
499
500 if '.hgtags' not in self.dirstate:
500 if '.hgtags' not in self.dirstate:
501 self[None].add(['.hgtags'])
501 self[None].add(['.hgtags'])
502
502
503 m = matchmod.exact(self.root, '', ['.hgtags'])
503 m = matchmod.exact(self.root, '', ['.hgtags'])
504 tagnode = self.commit(message, user, date, extra=extra, match=m)
504 tagnode = self.commit(message, user, date, extra=extra, match=m)
505
505
506 for name in names:
506 for name in names:
507 self.hook('tag', node=hex(node), tag=name, local=local)
507 self.hook('tag', node=hex(node), tag=name, local=local)
508
508
509 return tagnode
509 return tagnode
510
510
511 def tag(self, names, node, message, local, user, date):
511 def tag(self, names, node, message, local, user, date):
512 '''tag a revision with one or more symbolic names.
512 '''tag a revision with one or more symbolic names.
513
513
514 names is a list of strings or, when adding a single tag, names may be a
514 names is a list of strings or, when adding a single tag, names may be a
515 string.
515 string.
516
516
517 if local is True, the tags are stored in a per-repository file.
517 if local is True, the tags are stored in a per-repository file.
518 otherwise, they are stored in the .hgtags file, and a new
518 otherwise, they are stored in the .hgtags file, and a new
519 changeset is committed with the change.
519 changeset is committed with the change.
520
520
521 keyword arguments:
521 keyword arguments:
522
522
523 local: whether to store tags in non-version-controlled file
523 local: whether to store tags in non-version-controlled file
524 (default False)
524 (default False)
525
525
526 message: commit message to use if committing
526 message: commit message to use if committing
527
527
528 user: name of user to use if committing
528 user: name of user to use if committing
529
529
530 date: date tuple to use if committing'''
530 date: date tuple to use if committing'''
531
531
532 if not local:
532 if not local:
533 for x in self.status()[:5]:
533 for x in self.status()[:5]:
534 if '.hgtags' in x:
534 if '.hgtags' in x:
535 raise util.Abort(_('working copy of .hgtags is changed '
535 raise util.Abort(_('working copy of .hgtags is changed '
536 '(please commit .hgtags manually)'))
536 '(please commit .hgtags manually)'))
537
537
538 self.tags() # instantiate the cache
538 self.tags() # instantiate the cache
539 self._tag(names, node, message, local, user, date)
539 self._tag(names, node, message, local, user, date)
540
540
541 @filteredpropertycache
541 @filteredpropertycache
542 def _tagscache(self):
542 def _tagscache(self):
543 '''Returns a tagscache object that contains various tags related
543 '''Returns a tagscache object that contains various tags related
544 caches.'''
544 caches.'''
545
545
546 # This simplifies its cache management by having one decorated
546 # This simplifies its cache management by having one decorated
547 # function (this one) and the rest simply fetch things from it.
547 # function (this one) and the rest simply fetch things from it.
548 class tagscache(object):
548 class tagscache(object):
549 def __init__(self):
549 def __init__(self):
550 # These two define the set of tags for this repository. tags
550 # These two define the set of tags for this repository. tags
551 # maps tag name to node; tagtypes maps tag name to 'global' or
551 # maps tag name to node; tagtypes maps tag name to 'global' or
552 # 'local'. (Global tags are defined by .hgtags across all
552 # 'local'. (Global tags are defined by .hgtags across all
553 # heads, and local tags are defined in .hg/localtags.)
553 # heads, and local tags are defined in .hg/localtags.)
554 # They constitute the in-memory cache of tags.
554 # They constitute the in-memory cache of tags.
555 self.tags = self.tagtypes = None
555 self.tags = self.tagtypes = None
556
556
557 self.nodetagscache = self.tagslist = None
557 self.nodetagscache = self.tagslist = None
558
558
559 cache = tagscache()
559 cache = tagscache()
560 cache.tags, cache.tagtypes = self._findtags()
560 cache.tags, cache.tagtypes = self._findtags()
561
561
562 return cache
562 return cache
563
563
564 def tags(self):
564 def tags(self):
565 '''return a mapping of tag to node'''
565 '''return a mapping of tag to node'''
566 t = {}
566 t = {}
567 if self.changelog.filteredrevs:
567 if self.changelog.filteredrevs:
568 tags, tt = self._findtags()
568 tags, tt = self._findtags()
569 else:
569 else:
570 tags = self._tagscache.tags
570 tags = self._tagscache.tags
571 for k, v in tags.iteritems():
571 for k, v in tags.iteritems():
572 try:
572 try:
573 # ignore tags to unknown nodes
573 # ignore tags to unknown nodes
574 self.changelog.rev(v)
574 self.changelog.rev(v)
575 t[k] = v
575 t[k] = v
576 except (error.LookupError, ValueError):
576 except (error.LookupError, ValueError):
577 pass
577 pass
578 return t
578 return t
579
579
580 def _findtags(self):
580 def _findtags(self):
581 '''Do the hard work of finding tags. Return a pair of dicts
581 '''Do the hard work of finding tags. Return a pair of dicts
582 (tags, tagtypes) where tags maps tag name to node, and tagtypes
582 (tags, tagtypes) where tags maps tag name to node, and tagtypes
583 maps tag name to a string like \'global\' or \'local\'.
583 maps tag name to a string like \'global\' or \'local\'.
584 Subclasses or extensions are free to add their own tags, but
584 Subclasses or extensions are free to add their own tags, but
585 should be aware that the returned dicts will be retained for the
585 should be aware that the returned dicts will be retained for the
586 duration of the localrepo object.'''
586 duration of the localrepo object.'''
587
587
588 # XXX what tagtype should subclasses/extensions use? Currently
588 # XXX what tagtype should subclasses/extensions use? Currently
589 # mq and bookmarks add tags, but do not set the tagtype at all.
589 # mq and bookmarks add tags, but do not set the tagtype at all.
590 # Should each extension invent its own tag type? Should there
590 # Should each extension invent its own tag type? Should there
591 # be one tagtype for all such "virtual" tags? Or is the status
591 # be one tagtype for all such "virtual" tags? Or is the status
592 # quo fine?
592 # quo fine?
593
593
594 alltags = {} # map tag name to (node, hist)
594 alltags = {} # map tag name to (node, hist)
595 tagtypes = {}
595 tagtypes = {}
596
596
597 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
597 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
598 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
598 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
599
599
600 # Build the return dicts. Have to re-encode tag names because
600 # Build the return dicts. Have to re-encode tag names because
601 # the tags module always uses UTF-8 (in order not to lose info
601 # the tags module always uses UTF-8 (in order not to lose info
602 # writing to the cache), but the rest of Mercurial wants them in
602 # writing to the cache), but the rest of Mercurial wants them in
603 # local encoding.
603 # local encoding.
604 tags = {}
604 tags = {}
605 for (name, (node, hist)) in alltags.iteritems():
605 for (name, (node, hist)) in alltags.iteritems():
606 if node != nullid:
606 if node != nullid:
607 tags[encoding.tolocal(name)] = node
607 tags[encoding.tolocal(name)] = node
608 tags['tip'] = self.changelog.tip()
608 tags['tip'] = self.changelog.tip()
609 tagtypes = dict([(encoding.tolocal(name), value)
609 tagtypes = dict([(encoding.tolocal(name), value)
610 for (name, value) in tagtypes.iteritems()])
610 for (name, value) in tagtypes.iteritems()])
611 return (tags, tagtypes)
611 return (tags, tagtypes)
612
612
613 def tagtype(self, tagname):
613 def tagtype(self, tagname):
614 '''
614 '''
615 return the type of the given tag. result can be:
615 return the type of the given tag. result can be:
616
616
617 'local' : a local tag
617 'local' : a local tag
618 'global' : a global tag
618 'global' : a global tag
619 None : tag does not exist
619 None : tag does not exist
620 '''
620 '''
621
621
622 return self._tagscache.tagtypes.get(tagname)
622 return self._tagscache.tagtypes.get(tagname)
623
623
624 def tagslist(self):
624 def tagslist(self):
625 '''return a list of tags ordered by revision'''
625 '''return a list of tags ordered by revision'''
626 if not self._tagscache.tagslist:
626 if not self._tagscache.tagslist:
627 l = []
627 l = []
628 for t, n in self.tags().iteritems():
628 for t, n in self.tags().iteritems():
629 r = self.changelog.rev(n)
629 r = self.changelog.rev(n)
630 l.append((r, t, n))
630 l.append((r, t, n))
631 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
631 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
632
632
633 return self._tagscache.tagslist
633 return self._tagscache.tagslist
634
634
635 def nodetags(self, node):
635 def nodetags(self, node):
636 '''return the tags associated with a node'''
636 '''return the tags associated with a node'''
637 if not self._tagscache.nodetagscache:
637 if not self._tagscache.nodetagscache:
638 nodetagscache = {}
638 nodetagscache = {}
639 for t, n in self._tagscache.tags.iteritems():
639 for t, n in self._tagscache.tags.iteritems():
640 nodetagscache.setdefault(n, []).append(t)
640 nodetagscache.setdefault(n, []).append(t)
641 for tags in nodetagscache.itervalues():
641 for tags in nodetagscache.itervalues():
642 tags.sort()
642 tags.sort()
643 self._tagscache.nodetagscache = nodetagscache
643 self._tagscache.nodetagscache = nodetagscache
644 return self._tagscache.nodetagscache.get(node, [])
644 return self._tagscache.nodetagscache.get(node, [])
645
645
646 def nodebookmarks(self, node):
646 def nodebookmarks(self, node):
647 marks = []
647 marks = []
648 for bookmark, n in self._bookmarks.iteritems():
648 for bookmark, n in self._bookmarks.iteritems():
649 if n == node:
649 if n == node:
650 marks.append(bookmark)
650 marks.append(bookmark)
651 return sorted(marks)
651 return sorted(marks)
652
652
653 def branchmap(self):
653 def branchmap(self):
654 '''returns a dictionary {branch: [branchheads]}'''
654 '''returns a dictionary {branch: [branchheads]}'''
655 if self.filtername and not self.changelog.filteredrevs:
655 if self.filtername and not self.changelog.filteredrevs:
656 return self.unfiltered().branchmap()
656 return self.unfiltered().branchmap()
657 branchmap.updatecache(self)
657 branchmap.updatecache(self)
658 return self._branchcaches[self.filtername]
658 return self._branchcaches[self.filtername]
659
659
660
660
661 def _branchtip(self, heads):
661 def _branchtip(self, heads):
662 '''return the tipmost branch head in heads'''
662 '''return the tipmost branch head in heads'''
663 tip = heads[-1]
663 tip = heads[-1]
664 for h in reversed(heads):
664 for h in reversed(heads):
665 if not self[h].closesbranch():
665 if not self[h].closesbranch():
666 tip = h
666 tip = h
667 break
667 break
668 return tip
668 return tip
669
669
670 def branchtip(self, branch):
670 def branchtip(self, branch):
671 '''return the tip node for a given branch'''
671 '''return the tip node for a given branch'''
672 if branch not in self.branchmap():
672 if branch not in self.branchmap():
673 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
673 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
674 return self._branchtip(self.branchmap()[branch])
674 return self._branchtip(self.branchmap()[branch])
675
675
676 def branchtags(self):
676 def branchtags(self):
677 '''return a dict where branch names map to the tipmost head of
677 '''return a dict where branch names map to the tipmost head of
678 the branch, open heads come before closed'''
678 the branch, open heads come before closed'''
679 bt = {}
679 bt = {}
680 for bn, heads in self.branchmap().iteritems():
680 for bn, heads in self.branchmap().iteritems():
681 bt[bn] = self._branchtip(heads)
681 bt[bn] = self._branchtip(heads)
682 return bt
682 return bt
683
683
684 def lookup(self, key):
684 def lookup(self, key):
685 return self[key].node()
685 return self[key].node()
686
686
687 def lookupbranch(self, key, remote=None):
687 def lookupbranch(self, key, remote=None):
688 repo = remote or self
688 repo = remote or self
689 if key in repo.branchmap():
689 if key in repo.branchmap():
690 return key
690 return key
691
691
692 repo = (remote and remote.local()) and remote or self
692 repo = (remote and remote.local()) and remote or self
693 return repo[key].branch()
693 return repo[key].branch()
694
694
695 def known(self, nodes):
695 def known(self, nodes):
696 nm = self.changelog.nodemap
696 nm = self.changelog.nodemap
697 pc = self._phasecache
697 pc = self._phasecache
698 result = []
698 result = []
699 for n in nodes:
699 for n in nodes:
700 r = nm.get(n)
700 r = nm.get(n)
701 resp = not (r is None or pc.phase(self, r) >= phases.secret)
701 resp = not (r is None or pc.phase(self, r) >= phases.secret)
702 result.append(resp)
702 result.append(resp)
703 return result
703 return result
704
704
705 def local(self):
705 def local(self):
706 return self
706 return self
707
707
708 def cancopy(self):
708 def cancopy(self):
709 return self.local() # so statichttprepo's override of local() works
709 return self.local() # so statichttprepo's override of local() works
710
710
711 def join(self, f):
711 def join(self, f):
712 return os.path.join(self.path, f)
712 return os.path.join(self.path, f)
713
713
714 def wjoin(self, f):
714 def wjoin(self, f):
715 return os.path.join(self.root, f)
715 return os.path.join(self.root, f)
716
716
717 def file(self, f):
717 def file(self, f):
718 if f[0] == '/':
718 if f[0] == '/':
719 f = f[1:]
719 f = f[1:]
720 return filelog.filelog(self.sopener, f)
720 return filelog.filelog(self.sopener, f)
721
721
722 def changectx(self, changeid):
722 def changectx(self, changeid):
723 return self[changeid]
723 return self[changeid]
724
724
725 def parents(self, changeid=None):
725 def parents(self, changeid=None):
726 '''get list of changectxs for parents of changeid'''
726 '''get list of changectxs for parents of changeid'''
727 return self[changeid].parents()
727 return self[changeid].parents()
728
728
729 def setparents(self, p1, p2=nullid):
729 def setparents(self, p1, p2=nullid):
730 copies = self.dirstate.setparents(p1, p2)
730 copies = self.dirstate.setparents(p1, p2)
731 if copies:
731 if copies:
732 # Adjust copy records, the dirstate cannot do it, it
732 # Adjust copy records, the dirstate cannot do it, it
733 # requires access to parents manifests. Preserve them
733 # requires access to parents manifests. Preserve them
734 # only for entries added to first parent.
734 # only for entries added to first parent.
735 pctx = self[p1]
735 pctx = self[p1]
736 for f in copies:
736 for f in copies:
737 if f not in pctx and copies[f] in pctx:
737 if f not in pctx and copies[f] in pctx:
738 self.dirstate.copy(copies[f], f)
738 self.dirstate.copy(copies[f], f)
739
739
740 def filectx(self, path, changeid=None, fileid=None):
740 def filectx(self, path, changeid=None, fileid=None):
741 """changeid can be a changeset revision, node, or tag.
741 """changeid can be a changeset revision, node, or tag.
742 fileid can be a file revision or node."""
742 fileid can be a file revision or node."""
743 return context.filectx(self, path, changeid, fileid)
743 return context.filectx(self, path, changeid, fileid)
744
744
745 def getcwd(self):
745 def getcwd(self):
746 return self.dirstate.getcwd()
746 return self.dirstate.getcwd()
747
747
748 def pathto(self, f, cwd=None):
748 def pathto(self, f, cwd=None):
749 return self.dirstate.pathto(f, cwd)
749 return self.dirstate.pathto(f, cwd)
750
750
751 def wfile(self, f, mode='r'):
751 def wfile(self, f, mode='r'):
752 return self.wopener(f, mode)
752 return self.wopener(f, mode)
753
753
754 def _link(self, f):
754 def _link(self, f):
755 return os.path.islink(self.wjoin(f))
755 return os.path.islink(self.wjoin(f))
756
756
757 def _loadfilter(self, filter):
757 def _loadfilter(self, filter):
758 if filter not in self.filterpats:
758 if filter not in self.filterpats:
759 l = []
759 l = []
760 for pat, cmd in self.ui.configitems(filter):
760 for pat, cmd in self.ui.configitems(filter):
761 if cmd == '!':
761 if cmd == '!':
762 continue
762 continue
763 mf = matchmod.match(self.root, '', [pat])
763 mf = matchmod.match(self.root, '', [pat])
764 fn = None
764 fn = None
765 params = cmd
765 params = cmd
766 for name, filterfn in self._datafilters.iteritems():
766 for name, filterfn in self._datafilters.iteritems():
767 if cmd.startswith(name):
767 if cmd.startswith(name):
768 fn = filterfn
768 fn = filterfn
769 params = cmd[len(name):].lstrip()
769 params = cmd[len(name):].lstrip()
770 break
770 break
771 if not fn:
771 if not fn:
772 fn = lambda s, c, **kwargs: util.filter(s, c)
772 fn = lambda s, c, **kwargs: util.filter(s, c)
773 # Wrap old filters not supporting keyword arguments
773 # Wrap old filters not supporting keyword arguments
774 if not inspect.getargspec(fn)[2]:
774 if not inspect.getargspec(fn)[2]:
775 oldfn = fn
775 oldfn = fn
776 fn = lambda s, c, **kwargs: oldfn(s, c)
776 fn = lambda s, c, **kwargs: oldfn(s, c)
777 l.append((mf, fn, params))
777 l.append((mf, fn, params))
778 self.filterpats[filter] = l
778 self.filterpats[filter] = l
779 return self.filterpats[filter]
779 return self.filterpats[filter]
780
780
781 def _filter(self, filterpats, filename, data):
781 def _filter(self, filterpats, filename, data):
782 for mf, fn, cmd in filterpats:
782 for mf, fn, cmd in filterpats:
783 if mf(filename):
783 if mf(filename):
784 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
784 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
785 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
785 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
786 break
786 break
787
787
788 return data
788 return data
789
789
790 @unfilteredpropertycache
790 @unfilteredpropertycache
791 def _encodefilterpats(self):
791 def _encodefilterpats(self):
792 return self._loadfilter('encode')
792 return self._loadfilter('encode')
793
793
794 @unfilteredpropertycache
794 @unfilteredpropertycache
795 def _decodefilterpats(self):
795 def _decodefilterpats(self):
796 return self._loadfilter('decode')
796 return self._loadfilter('decode')
797
797
798 def adddatafilter(self, name, filter):
798 def adddatafilter(self, name, filter):
799 self._datafilters[name] = filter
799 self._datafilters[name] = filter
800
800
801 def wread(self, filename):
801 def wread(self, filename):
802 if self._link(filename):
802 if self._link(filename):
803 data = os.readlink(self.wjoin(filename))
803 data = os.readlink(self.wjoin(filename))
804 else:
804 else:
805 data = self.wopener.read(filename)
805 data = self.wopener.read(filename)
806 return self._filter(self._encodefilterpats, filename, data)
806 return self._filter(self._encodefilterpats, filename, data)
807
807
808 def wwrite(self, filename, data, flags):
808 def wwrite(self, filename, data, flags):
809 data = self._filter(self._decodefilterpats, filename, data)
809 data = self._filter(self._decodefilterpats, filename, data)
810 if 'l' in flags:
810 if 'l' in flags:
811 self.wopener.symlink(data, filename)
811 self.wopener.symlink(data, filename)
812 else:
812 else:
813 self.wopener.write(filename, data)
813 self.wopener.write(filename, data)
814 if 'x' in flags:
814 if 'x' in flags:
815 util.setflags(self.wjoin(filename), False, True)
815 util.setflags(self.wjoin(filename), False, True)
816
816
817 def wwritedata(self, filename, data):
817 def wwritedata(self, filename, data):
818 return self._filter(self._decodefilterpats, filename, data)
818 return self._filter(self._decodefilterpats, filename, data)
819
819
820 def transaction(self, desc):
820 def transaction(self, desc):
821 tr = self._transref and self._transref() or None
821 tr = self._transref and self._transref() or None
822 if tr and tr.running():
822 if tr and tr.running():
823 return tr.nest()
823 return tr.nest()
824
824
825 # abort here if the journal already exists
825 # abort here if the journal already exists
826 if os.path.exists(self.sjoin("journal")):
826 if os.path.exists(self.sjoin("journal")):
827 raise error.RepoError(
827 raise error.RepoError(
828 _("abandoned transaction found - run hg recover"))
828 _("abandoned transaction found - run hg recover"))
829
829
830 self._writejournal(desc)
830 self._writejournal(desc)
831 renames = [(x, undoname(x)) for x in self._journalfiles()]
831 renames = [(x, undoname(x)) for x in self._journalfiles()]
832
832
833 tr = transaction.transaction(self.ui.warn, self.sopener,
833 tr = transaction.transaction(self.ui.warn, self.sopener,
834 self.sjoin("journal"),
834 self.sjoin("journal"),
835 aftertrans(renames),
835 aftertrans(renames),
836 self.store.createmode)
836 self.store.createmode)
837 self._transref = weakref.ref(tr)
837 self._transref = weakref.ref(tr)
838 return tr
838 return tr
839
839
840 def _journalfiles(self):
840 def _journalfiles(self):
841 return (self.sjoin('journal'), self.join('journal.dirstate'),
841 return (self.sjoin('journal'), self.join('journal.dirstate'),
842 self.join('journal.branch'), self.join('journal.desc'),
842 self.join('journal.branch'), self.join('journal.desc'),
843 self.join('journal.bookmarks'),
843 self.join('journal.bookmarks'),
844 self.sjoin('journal.phaseroots'))
844 self.sjoin('journal.phaseroots'))
845
845
846 def undofiles(self):
846 def undofiles(self):
847 return [undoname(x) for x in self._journalfiles()]
847 return [undoname(x) for x in self._journalfiles()]
848
848
849 def _writejournal(self, desc):
849 def _writejournal(self, desc):
850 self.opener.write("journal.dirstate",
850 self.opener.write("journal.dirstate",
851 self.opener.tryread("dirstate"))
851 self.opener.tryread("dirstate"))
852 self.opener.write("journal.branch",
852 self.opener.write("journal.branch",
853 encoding.fromlocal(self.dirstate.branch()))
853 encoding.fromlocal(self.dirstate.branch()))
854 self.opener.write("journal.desc",
854 self.opener.write("journal.desc",
855 "%d\n%s\n" % (len(self), desc))
855 "%d\n%s\n" % (len(self), desc))
856 self.opener.write("journal.bookmarks",
856 self.opener.write("journal.bookmarks",
857 self.opener.tryread("bookmarks"))
857 self.opener.tryread("bookmarks"))
858 self.sopener.write("journal.phaseroots",
858 self.sopener.write("journal.phaseroots",
859 self.sopener.tryread("phaseroots"))
859 self.sopener.tryread("phaseroots"))
860
860
861 def recover(self):
861 def recover(self):
862 lock = self.lock()
862 lock = self.lock()
863 try:
863 try:
864 if os.path.exists(self.sjoin("journal")):
864 if os.path.exists(self.sjoin("journal")):
865 self.ui.status(_("rolling back interrupted transaction\n"))
865 self.ui.status(_("rolling back interrupted transaction\n"))
866 transaction.rollback(self.sopener, self.sjoin("journal"),
866 transaction.rollback(self.sopener, self.sjoin("journal"),
867 self.ui.warn)
867 self.ui.warn)
868 self.invalidate()
868 self.invalidate()
869 return True
869 return True
870 else:
870 else:
871 self.ui.warn(_("no interrupted transaction available\n"))
871 self.ui.warn(_("no interrupted transaction available\n"))
872 return False
872 return False
873 finally:
873 finally:
874 lock.release()
874 lock.release()
875
875
876 def rollback(self, dryrun=False, force=False):
876 def rollback(self, dryrun=False, force=False):
877 wlock = lock = None
877 wlock = lock = None
878 try:
878 try:
879 wlock = self.wlock()
879 wlock = self.wlock()
880 lock = self.lock()
880 lock = self.lock()
881 if os.path.exists(self.sjoin("undo")):
881 if os.path.exists(self.sjoin("undo")):
882 return self._rollback(dryrun, force)
882 return self._rollback(dryrun, force)
883 else:
883 else:
884 self.ui.warn(_("no rollback information available\n"))
884 self.ui.warn(_("no rollback information available\n"))
885 return 1
885 return 1
886 finally:
886 finally:
887 release(lock, wlock)
887 release(lock, wlock)
888
888
889 @unfilteredmethod # Until we get smarter cache management
889 @unfilteredmethod # Until we get smarter cache management
890 def _rollback(self, dryrun, force):
890 def _rollback(self, dryrun, force):
891 ui = self.ui
891 ui = self.ui
892 try:
892 try:
893 args = self.opener.read('undo.desc').splitlines()
893 args = self.opener.read('undo.desc').splitlines()
894 (oldlen, desc, detail) = (int(args[0]), args[1], None)
894 (oldlen, desc, detail) = (int(args[0]), args[1], None)
895 if len(args) >= 3:
895 if len(args) >= 3:
896 detail = args[2]
896 detail = args[2]
897 oldtip = oldlen - 1
897 oldtip = oldlen - 1
898
898
899 if detail and ui.verbose:
899 if detail and ui.verbose:
900 msg = (_('repository tip rolled back to revision %s'
900 msg = (_('repository tip rolled back to revision %s'
901 ' (undo %s: %s)\n')
901 ' (undo %s: %s)\n')
902 % (oldtip, desc, detail))
902 % (oldtip, desc, detail))
903 else:
903 else:
904 msg = (_('repository tip rolled back to revision %s'
904 msg = (_('repository tip rolled back to revision %s'
905 ' (undo %s)\n')
905 ' (undo %s)\n')
906 % (oldtip, desc))
906 % (oldtip, desc))
907 except IOError:
907 except IOError:
908 msg = _('rolling back unknown transaction\n')
908 msg = _('rolling back unknown transaction\n')
909 desc = None
909 desc = None
910
910
911 if not force and self['.'] != self['tip'] and desc == 'commit':
911 if not force and self['.'] != self['tip'] and desc == 'commit':
912 raise util.Abort(
912 raise util.Abort(
913 _('rollback of last commit while not checked out '
913 _('rollback of last commit while not checked out '
914 'may lose data'), hint=_('use -f to force'))
914 'may lose data'), hint=_('use -f to force'))
915
915
916 ui.status(msg)
916 ui.status(msg)
917 if dryrun:
917 if dryrun:
918 return 0
918 return 0
919
919
920 parents = self.dirstate.parents()
920 parents = self.dirstate.parents()
921 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
921 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
922 if os.path.exists(self.join('undo.bookmarks')):
922 if os.path.exists(self.join('undo.bookmarks')):
923 util.rename(self.join('undo.bookmarks'),
923 util.rename(self.join('undo.bookmarks'),
924 self.join('bookmarks'))
924 self.join('bookmarks'))
925 if os.path.exists(self.sjoin('undo.phaseroots')):
925 if os.path.exists(self.sjoin('undo.phaseroots')):
926 util.rename(self.sjoin('undo.phaseroots'),
926 util.rename(self.sjoin('undo.phaseroots'),
927 self.sjoin('phaseroots'))
927 self.sjoin('phaseroots'))
928 self.invalidate()
928 self.invalidate()
929
929
930 # Discard all cache entries to force reloading everything.
930 # Discard all cache entries to force reloading everything.
931 self._filecache.clear()
931 self._filecache.clear()
932
932
933 parentgone = (parents[0] not in self.changelog.nodemap or
933 parentgone = (parents[0] not in self.changelog.nodemap or
934 parents[1] not in self.changelog.nodemap)
934 parents[1] not in self.changelog.nodemap)
935 if parentgone:
935 if parentgone:
936 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
936 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
937 try:
937 try:
938 branch = self.opener.read('undo.branch')
938 branch = self.opener.read('undo.branch')
939 self.dirstate.setbranch(encoding.tolocal(branch))
939 self.dirstate.setbranch(encoding.tolocal(branch))
940 except IOError:
940 except IOError:
941 ui.warn(_('named branch could not be reset: '
941 ui.warn(_('named branch could not be reset: '
942 'current branch is still \'%s\'\n')
942 'current branch is still \'%s\'\n')
943 % self.dirstate.branch())
943 % self.dirstate.branch())
944
944
945 self.dirstate.invalidate()
945 self.dirstate.invalidate()
946 parents = tuple([p.rev() for p in self.parents()])
946 parents = tuple([p.rev() for p in self.parents()])
947 if len(parents) > 1:
947 if len(parents) > 1:
948 ui.status(_('working directory now based on '
948 ui.status(_('working directory now based on '
949 'revisions %d and %d\n') % parents)
949 'revisions %d and %d\n') % parents)
950 else:
950 else:
951 ui.status(_('working directory now based on '
951 ui.status(_('working directory now based on '
952 'revision %d\n') % parents)
952 'revision %d\n') % parents)
953 # TODO: if we know which new heads may result from this rollback, pass
953 # TODO: if we know which new heads may result from this rollback, pass
954 # them to destroy(), which will prevent the branchhead cache from being
954 # them to destroy(), which will prevent the branchhead cache from being
955 # invalidated.
955 # invalidated.
956 self.destroyed()
956 self.destroyed()
957 return 0
957 return 0
958
958
959 def invalidatecaches(self):
959 def invalidatecaches(self):
960
960
961 if '_tagscache' in vars(self):
961 if '_tagscache' in vars(self):
962 # can't use delattr on proxy
962 # can't use delattr on proxy
963 del self.__dict__['_tagscache']
963 del self.__dict__['_tagscache']
964
964
965 self.unfiltered()._branchcaches.clear()
965 self.unfiltered()._branchcaches.clear()
966 self.invalidatevolatilesets()
966 self.invalidatevolatilesets()
967
967
968 def invalidatevolatilesets(self):
968 def invalidatevolatilesets(self):
969 self.filteredrevcache.clear()
969 self.filteredrevcache.clear()
970 obsolete.clearobscaches(self)
970 obsolete.clearobscaches(self)
971 if 'hiddenrevs' in vars(self):
971 if 'hiddenrevs' in vars(self):
972 del self.hiddenrevs
972 del self.hiddenrevs
973
973
974 def invalidatedirstate(self):
974 def invalidatedirstate(self):
975 '''Invalidates the dirstate, causing the next call to dirstate
975 '''Invalidates the dirstate, causing the next call to dirstate
976 to check if it was modified since the last time it was read,
976 to check if it was modified since the last time it was read,
977 rereading it if it has.
977 rereading it if it has.
978
978
979 This is different to dirstate.invalidate() that it doesn't always
979 This is different to dirstate.invalidate() that it doesn't always
980 rereads the dirstate. Use dirstate.invalidate() if you want to
980 rereads the dirstate. Use dirstate.invalidate() if you want to
981 explicitly read the dirstate again (i.e. restoring it to a previous
981 explicitly read the dirstate again (i.e. restoring it to a previous
982 known good state).'''
982 known good state).'''
983 if hasunfilteredcache(self, 'dirstate'):
983 if hasunfilteredcache(self, 'dirstate'):
984 for k in self.dirstate._filecache:
984 for k in self.dirstate._filecache:
985 try:
985 try:
986 delattr(self.dirstate, k)
986 delattr(self.dirstate, k)
987 except AttributeError:
987 except AttributeError:
988 pass
988 pass
989 delattr(self.unfiltered(), 'dirstate')
989 delattr(self.unfiltered(), 'dirstate')
990
990
991 def invalidate(self):
991 def invalidate(self):
992 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
992 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
993 for k in self._filecache:
993 for k in self._filecache:
994 # dirstate is invalidated separately in invalidatedirstate()
994 # dirstate is invalidated separately in invalidatedirstate()
995 if k == 'dirstate':
995 if k == 'dirstate':
996 continue
996 continue
997
997
998 try:
998 try:
999 delattr(unfiltered, k)
999 delattr(unfiltered, k)
1000 except AttributeError:
1000 except AttributeError:
1001 pass
1001 pass
1002 self.invalidatecaches()
1002 self.invalidatecaches()
1003
1003
1004 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1004 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1005 try:
1005 try:
1006 l = lock.lock(lockname, 0, releasefn, desc=desc)
1006 l = lock.lock(lockname, 0, releasefn, desc=desc)
1007 except error.LockHeld, inst:
1007 except error.LockHeld, inst:
1008 if not wait:
1008 if not wait:
1009 raise
1009 raise
1010 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1010 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1011 (desc, inst.locker))
1011 (desc, inst.locker))
1012 # default to 600 seconds timeout
1012 # default to 600 seconds timeout
1013 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1013 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1014 releasefn, desc=desc)
1014 releasefn, desc=desc)
1015 if acquirefn:
1015 if acquirefn:
1016 acquirefn()
1016 acquirefn()
1017 return l
1017 return l
1018
1018
1019 def _afterlock(self, callback):
1019 def _afterlock(self, callback):
1020 """add a callback to the current repository lock.
1020 """add a callback to the current repository lock.
1021
1021
1022 The callback will be executed on lock release."""
1022 The callback will be executed on lock release."""
1023 l = self._lockref and self._lockref()
1023 l = self._lockref and self._lockref()
1024 if l:
1024 if l:
1025 l.postrelease.append(callback)
1025 l.postrelease.append(callback)
1026 else:
1026 else:
1027 callback()
1027 callback()
1028
1028
1029 def lock(self, wait=True):
1029 def lock(self, wait=True):
1030 '''Lock the repository store (.hg/store) and return a weak reference
1030 '''Lock the repository store (.hg/store) and return a weak reference
1031 to the lock. Use this before modifying the store (e.g. committing or
1031 to the lock. Use this before modifying the store (e.g. committing or
1032 stripping). If you are opening a transaction, get a lock as well.)'''
1032 stripping). If you are opening a transaction, get a lock as well.)'''
1033 l = self._lockref and self._lockref()
1033 l = self._lockref and self._lockref()
1034 if l is not None and l.held:
1034 if l is not None and l.held:
1035 l.lock()
1035 l.lock()
1036 return l
1036 return l
1037
1037
1038 def unlock():
1038 def unlock():
1039 self.store.write()
1039 self.store.write()
1040 if hasunfilteredcache(self, '_phasecache'):
1040 if hasunfilteredcache(self, '_phasecache'):
1041 self._phasecache.write()
1041 self._phasecache.write()
1042 for k, ce in self._filecache.items():
1042 for k, ce in self._filecache.items():
1043 if k == 'dirstate':
1043 if k == 'dirstate':
1044 continue
1044 continue
1045 ce.refresh()
1045 ce.refresh()
1046
1046
1047 l = self._lock(self.sjoin("lock"), wait, unlock,
1047 l = self._lock(self.sjoin("lock"), wait, unlock,
1048 self.invalidate, _('repository %s') % self.origroot)
1048 self.invalidate, _('repository %s') % self.origroot)
1049 self._lockref = weakref.ref(l)
1049 self._lockref = weakref.ref(l)
1050 return l
1050 return l
1051
1051
1052 def wlock(self, wait=True):
1052 def wlock(self, wait=True):
1053 '''Lock the non-store parts of the repository (everything under
1053 '''Lock the non-store parts of the repository (everything under
1054 .hg except .hg/store) and return a weak reference to the lock.
1054 .hg except .hg/store) and return a weak reference to the lock.
1055 Use this before modifying files in .hg.'''
1055 Use this before modifying files in .hg.'''
1056 l = self._wlockref and self._wlockref()
1056 l = self._wlockref and self._wlockref()
1057 if l is not None and l.held:
1057 if l is not None and l.held:
1058 l.lock()
1058 l.lock()
1059 return l
1059 return l
1060
1060
1061 def unlock():
1061 def unlock():
1062 self.dirstate.write()
1062 self.dirstate.write()
1063 ce = self._filecache.get('dirstate')
1063 ce = self._filecache.get('dirstate')
1064 if ce:
1064 if ce:
1065 ce.refresh()
1065 ce.refresh()
1066
1066
1067 l = self._lock(self.join("wlock"), wait, unlock,
1067 l = self._lock(self.join("wlock"), wait, unlock,
1068 self.invalidatedirstate, _('working directory of %s') %
1068 self.invalidatedirstate, _('working directory of %s') %
1069 self.origroot)
1069 self.origroot)
1070 self._wlockref = weakref.ref(l)
1070 self._wlockref = weakref.ref(l)
1071 return l
1071 return l
1072
1072
1073 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1073 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1074 """
1074 """
1075 commit an individual file as part of a larger transaction
1075 commit an individual file as part of a larger transaction
1076 """
1076 """
1077
1077
1078 fname = fctx.path()
1078 fname = fctx.path()
1079 text = fctx.data()
1079 text = fctx.data()
1080 flog = self.file(fname)
1080 flog = self.file(fname)
1081 fparent1 = manifest1.get(fname, nullid)
1081 fparent1 = manifest1.get(fname, nullid)
1082 fparent2 = fparent2o = manifest2.get(fname, nullid)
1082 fparent2 = fparent2o = manifest2.get(fname, nullid)
1083
1083
1084 meta = {}
1084 meta = {}
1085 copy = fctx.renamed()
1085 copy = fctx.renamed()
1086 if copy and copy[0] != fname:
1086 if copy and copy[0] != fname:
1087 # Mark the new revision of this file as a copy of another
1087 # Mark the new revision of this file as a copy of another
1088 # file. This copy data will effectively act as a parent
1088 # file. This copy data will effectively act as a parent
1089 # of this new revision. If this is a merge, the first
1089 # of this new revision. If this is a merge, the first
1090 # parent will be the nullid (meaning "look up the copy data")
1090 # parent will be the nullid (meaning "look up the copy data")
1091 # and the second one will be the other parent. For example:
1091 # and the second one will be the other parent. For example:
1092 #
1092 #
1093 # 0 --- 1 --- 3 rev1 changes file foo
1093 # 0 --- 1 --- 3 rev1 changes file foo
1094 # \ / rev2 renames foo to bar and changes it
1094 # \ / rev2 renames foo to bar and changes it
1095 # \- 2 -/ rev3 should have bar with all changes and
1095 # \- 2 -/ rev3 should have bar with all changes and
1096 # should record that bar descends from
1096 # should record that bar descends from
1097 # bar in rev2 and foo in rev1
1097 # bar in rev2 and foo in rev1
1098 #
1098 #
1099 # this allows this merge to succeed:
1099 # this allows this merge to succeed:
1100 #
1100 #
1101 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1101 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1102 # \ / merging rev3 and rev4 should use bar@rev2
1102 # \ / merging rev3 and rev4 should use bar@rev2
1103 # \- 2 --- 4 as the merge base
1103 # \- 2 --- 4 as the merge base
1104 #
1104 #
1105
1105
1106 cfname = copy[0]
1106 cfname = copy[0]
1107 crev = manifest1.get(cfname)
1107 crev = manifest1.get(cfname)
1108 newfparent = fparent2
1108 newfparent = fparent2
1109
1109
1110 if manifest2: # branch merge
1110 if manifest2: # branch merge
1111 if fparent2 == nullid or crev is None: # copied on remote side
1111 if fparent2 == nullid or crev is None: # copied on remote side
1112 if cfname in manifest2:
1112 if cfname in manifest2:
1113 crev = manifest2[cfname]
1113 crev = manifest2[cfname]
1114 newfparent = fparent1
1114 newfparent = fparent1
1115
1115
1116 # find source in nearest ancestor if we've lost track
1116 # find source in nearest ancestor if we've lost track
1117 if not crev:
1117 if not crev:
1118 self.ui.debug(" %s: searching for copy revision for %s\n" %
1118 self.ui.debug(" %s: searching for copy revision for %s\n" %
1119 (fname, cfname))
1119 (fname, cfname))
1120 for ancestor in self[None].ancestors():
1120 for ancestor in self[None].ancestors():
1121 if cfname in ancestor:
1121 if cfname in ancestor:
1122 crev = ancestor[cfname].filenode()
1122 crev = ancestor[cfname].filenode()
1123 break
1123 break
1124
1124
1125 if crev:
1125 if crev:
1126 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1126 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1127 meta["copy"] = cfname
1127 meta["copy"] = cfname
1128 meta["copyrev"] = hex(crev)
1128 meta["copyrev"] = hex(crev)
1129 fparent1, fparent2 = nullid, newfparent
1129 fparent1, fparent2 = nullid, newfparent
1130 else:
1130 else:
1131 self.ui.warn(_("warning: can't find ancestor for '%s' "
1131 self.ui.warn(_("warning: can't find ancestor for '%s' "
1132 "copied from '%s'!\n") % (fname, cfname))
1132 "copied from '%s'!\n") % (fname, cfname))
1133
1133
1134 elif fparent2 != nullid:
1134 elif fparent2 != nullid:
1135 # is one parent an ancestor of the other?
1135 # is one parent an ancestor of the other?
1136 fparentancestor = flog.ancestor(fparent1, fparent2)
1136 fparentancestor = flog.ancestor(fparent1, fparent2)
1137 if fparentancestor == fparent1:
1137 if fparentancestor == fparent1:
1138 fparent1, fparent2 = fparent2, nullid
1138 fparent1, fparent2 = fparent2, nullid
1139 elif fparentancestor == fparent2:
1139 elif fparentancestor == fparent2:
1140 fparent2 = nullid
1140 fparent2 = nullid
1141
1141
1142 # is the file changed?
1142 # is the file changed?
1143 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1143 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1144 changelist.append(fname)
1144 changelist.append(fname)
1145 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1145 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1146
1146
1147 # are just the flags changed during merge?
1147 # are just the flags changed during merge?
1148 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1148 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1149 changelist.append(fname)
1149 changelist.append(fname)
1150
1150
1151 return fparent1
1151 return fparent1
1152
1152
1153 @unfilteredmethod
1153 @unfilteredmethod
1154 def commit(self, text="", user=None, date=None, match=None, force=False,
1154 def commit(self, text="", user=None, date=None, match=None, force=False,
1155 editor=False, extra={}):
1155 editor=False, extra={}):
1156 """Add a new revision to current repository.
1156 """Add a new revision to current repository.
1157
1157
1158 Revision information is gathered from the working directory,
1158 Revision information is gathered from the working directory,
1159 match can be used to filter the committed files. If editor is
1159 match can be used to filter the committed files. If editor is
1160 supplied, it is called to get a commit message.
1160 supplied, it is called to get a commit message.
1161 """
1161 """
1162
1162
1163 def fail(f, msg):
1163 def fail(f, msg):
1164 raise util.Abort('%s: %s' % (f, msg))
1164 raise util.Abort('%s: %s' % (f, msg))
1165
1165
1166 if not match:
1166 if not match:
1167 match = matchmod.always(self.root, '')
1167 match = matchmod.always(self.root, '')
1168
1168
1169 if not force:
1169 if not force:
1170 vdirs = []
1170 vdirs = []
1171 match.dir = vdirs.append
1171 match.dir = vdirs.append
1172 match.bad = fail
1172 match.bad = fail
1173
1173
1174 wlock = self.wlock()
1174 wlock = self.wlock()
1175 try:
1175 try:
1176 wctx = self[None]
1176 wctx = self[None]
1177 merge = len(wctx.parents()) > 1
1177 merge = len(wctx.parents()) > 1
1178
1178
1179 if (not force and merge and match and
1179 if (not force and merge and match and
1180 (match.files() or match.anypats())):
1180 (match.files() or match.anypats())):
1181 raise util.Abort(_('cannot partially commit a merge '
1181 raise util.Abort(_('cannot partially commit a merge '
1182 '(do not specify files or patterns)'))
1182 '(do not specify files or patterns)'))
1183
1183
1184 changes = self.status(match=match, clean=force)
1184 changes = self.status(match=match, clean=force)
1185 if force:
1185 if force:
1186 changes[0].extend(changes[6]) # mq may commit unchanged files
1186 changes[0].extend(changes[6]) # mq may commit unchanged files
1187
1187
1188 # check subrepos
1188 # check subrepos
1189 subs = []
1189 subs = []
1190 commitsubs = set()
1190 commitsubs = set()
1191 newstate = wctx.substate.copy()
1191 newstate = wctx.substate.copy()
1192 # only manage subrepos and .hgsubstate if .hgsub is present
1192 # only manage subrepos and .hgsubstate if .hgsub is present
1193 if '.hgsub' in wctx:
1193 if '.hgsub' in wctx:
1194 # we'll decide whether to track this ourselves, thanks
1194 # we'll decide whether to track this ourselves, thanks
1195 if '.hgsubstate' in changes[0]:
1195 if '.hgsubstate' in changes[0]:
1196 changes[0].remove('.hgsubstate')
1196 changes[0].remove('.hgsubstate')
1197 if '.hgsubstate' in changes[2]:
1197 if '.hgsubstate' in changes[2]:
1198 changes[2].remove('.hgsubstate')
1198 changes[2].remove('.hgsubstate')
1199
1199
1200 # compare current state to last committed state
1200 # compare current state to last committed state
1201 # build new substate based on last committed state
1201 # build new substate based on last committed state
1202 oldstate = wctx.p1().substate
1202 oldstate = wctx.p1().substate
1203 for s in sorted(newstate.keys()):
1203 for s in sorted(newstate.keys()):
1204 if not match(s):
1204 if not match(s):
1205 # ignore working copy, use old state if present
1205 # ignore working copy, use old state if present
1206 if s in oldstate:
1206 if s in oldstate:
1207 newstate[s] = oldstate[s]
1207 newstate[s] = oldstate[s]
1208 continue
1208 continue
1209 if not force:
1209 if not force:
1210 raise util.Abort(
1210 raise util.Abort(
1211 _("commit with new subrepo %s excluded") % s)
1211 _("commit with new subrepo %s excluded") % s)
1212 if wctx.sub(s).dirty(True):
1212 if wctx.sub(s).dirty(True):
1213 if not self.ui.configbool('ui', 'commitsubrepos'):
1213 if not self.ui.configbool('ui', 'commitsubrepos'):
1214 raise util.Abort(
1214 raise util.Abort(
1215 _("uncommitted changes in subrepo %s") % s,
1215 _("uncommitted changes in subrepo %s") % s,
1216 hint=_("use --subrepos for recursive commit"))
1216 hint=_("use --subrepos for recursive commit"))
1217 subs.append(s)
1217 subs.append(s)
1218 commitsubs.add(s)
1218 commitsubs.add(s)
1219 else:
1219 else:
1220 bs = wctx.sub(s).basestate()
1220 bs = wctx.sub(s).basestate()
1221 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1221 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1222 if oldstate.get(s, (None, None, None))[1] != bs:
1222 if oldstate.get(s, (None, None, None))[1] != bs:
1223 subs.append(s)
1223 subs.append(s)
1224
1224
1225 # check for removed subrepos
1225 # check for removed subrepos
1226 for p in wctx.parents():
1226 for p in wctx.parents():
1227 r = [s for s in p.substate if s not in newstate]
1227 r = [s for s in p.substate if s not in newstate]
1228 subs += [s for s in r if match(s)]
1228 subs += [s for s in r if match(s)]
1229 if subs:
1229 if subs:
1230 if (not match('.hgsub') and
1230 if (not match('.hgsub') and
1231 '.hgsub' in (wctx.modified() + wctx.added())):
1231 '.hgsub' in (wctx.modified() + wctx.added())):
1232 raise util.Abort(
1232 raise util.Abort(
1233 _("can't commit subrepos without .hgsub"))
1233 _("can't commit subrepos without .hgsub"))
1234 changes[0].insert(0, '.hgsubstate')
1234 changes[0].insert(0, '.hgsubstate')
1235
1235
1236 elif '.hgsub' in changes[2]:
1236 elif '.hgsub' in changes[2]:
1237 # clean up .hgsubstate when .hgsub is removed
1237 # clean up .hgsubstate when .hgsub is removed
1238 if ('.hgsubstate' in wctx and
1238 if ('.hgsubstate' in wctx and
1239 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1239 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1240 changes[2].insert(0, '.hgsubstate')
1240 changes[2].insert(0, '.hgsubstate')
1241
1241
1242 # make sure all explicit patterns are matched
1242 # make sure all explicit patterns are matched
1243 if not force and match.files():
1243 if not force and match.files():
1244 matched = set(changes[0] + changes[1] + changes[2])
1244 matched = set(changes[0] + changes[1] + changes[2])
1245
1245
1246 for f in match.files():
1246 for f in match.files():
1247 f = self.dirstate.normalize(f)
1247 f = self.dirstate.normalize(f)
1248 if f == '.' or f in matched or f in wctx.substate:
1248 if f == '.' or f in matched or f in wctx.substate:
1249 continue
1249 continue
1250 if f in changes[3]: # missing
1250 if f in changes[3]: # missing
1251 fail(f, _('file not found!'))
1251 fail(f, _('file not found!'))
1252 if f in vdirs: # visited directory
1252 if f in vdirs: # visited directory
1253 d = f + '/'
1253 d = f + '/'
1254 for mf in matched:
1254 for mf in matched:
1255 if mf.startswith(d):
1255 if mf.startswith(d):
1256 break
1256 break
1257 else:
1257 else:
1258 fail(f, _("no match under directory!"))
1258 fail(f, _("no match under directory!"))
1259 elif f not in self.dirstate:
1259 elif f not in self.dirstate:
1260 fail(f, _("file not tracked!"))
1260 fail(f, _("file not tracked!"))
1261
1261
1262 if (not force and not extra.get("close") and not merge
1262 if (not force and not extra.get("close") and not merge
1263 and not (changes[0] or changes[1] or changes[2])
1263 and not (changes[0] or changes[1] or changes[2])
1264 and wctx.branch() == wctx.p1().branch()):
1264 and wctx.branch() == wctx.p1().branch()):
1265 return None
1265 return None
1266
1266
1267 if merge and changes[3]:
1267 if merge and changes[3]:
1268 raise util.Abort(_("cannot commit merge with missing files"))
1268 raise util.Abort(_("cannot commit merge with missing files"))
1269
1269
1270 ms = mergemod.mergestate(self)
1270 ms = mergemod.mergestate(self)
1271 for f in changes[0]:
1271 for f in changes[0]:
1272 if f in ms and ms[f] == 'u':
1272 if f in ms and ms[f] == 'u':
1273 raise util.Abort(_("unresolved merge conflicts "
1273 raise util.Abort(_("unresolved merge conflicts "
1274 "(see hg help resolve)"))
1274 "(see hg help resolve)"))
1275
1275
1276 cctx = context.workingctx(self, text, user, date, extra, changes)
1276 cctx = context.workingctx(self, text, user, date, extra, changes)
1277 if editor:
1277 if editor:
1278 cctx._text = editor(self, cctx, subs)
1278 cctx._text = editor(self, cctx, subs)
1279 edited = (text != cctx._text)
1279 edited = (text != cctx._text)
1280
1280
1281 # commit subs and write new state
1281 # commit subs and write new state
1282 if subs:
1282 if subs:
1283 for s in sorted(commitsubs):
1283 for s in sorted(commitsubs):
1284 sub = wctx.sub(s)
1284 sub = wctx.sub(s)
1285 self.ui.status(_('committing subrepository %s\n') %
1285 self.ui.status(_('committing subrepository %s\n') %
1286 subrepo.subrelpath(sub))
1286 subrepo.subrelpath(sub))
1287 sr = sub.commit(cctx._text, user, date)
1287 sr = sub.commit(cctx._text, user, date)
1288 newstate[s] = (newstate[s][0], sr)
1288 newstate[s] = (newstate[s][0], sr)
1289 subrepo.writestate(self, newstate)
1289 subrepo.writestate(self, newstate)
1290
1290
1291 # Save commit message in case this transaction gets rolled back
1291 # Save commit message in case this transaction gets rolled back
1292 # (e.g. by a pretxncommit hook). Leave the content alone on
1292 # (e.g. by a pretxncommit hook). Leave the content alone on
1293 # the assumption that the user will use the same editor again.
1293 # the assumption that the user will use the same editor again.
1294 msgfn = self.savecommitmessage(cctx._text)
1294 msgfn = self.savecommitmessage(cctx._text)
1295
1295
1296 p1, p2 = self.dirstate.parents()
1296 p1, p2 = self.dirstate.parents()
1297 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1297 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1298 try:
1298 try:
1299 self.hook("precommit", throw=True, parent1=hookp1,
1299 self.hook("precommit", throw=True, parent1=hookp1,
1300 parent2=hookp2)
1300 parent2=hookp2)
1301 ret = self.commitctx(cctx, True)
1301 ret = self.commitctx(cctx, True)
1302 except: # re-raises
1302 except: # re-raises
1303 if edited:
1303 if edited:
1304 self.ui.write(
1304 self.ui.write(
1305 _('note: commit message saved in %s\n') % msgfn)
1305 _('note: commit message saved in %s\n') % msgfn)
1306 raise
1306 raise
1307
1307
1308 # update bookmarks, dirstate and mergestate
1308 # update bookmarks, dirstate and mergestate
1309 bookmarks.update(self, [p1, p2], ret)
1309 bookmarks.update(self, [p1, p2], ret)
1310 for f in changes[0] + changes[1]:
1310 for f in changes[0] + changes[1]:
1311 self.dirstate.normal(f)
1311 self.dirstate.normal(f)
1312 for f in changes[2]:
1312 for f in changes[2]:
1313 self.dirstate.drop(f)
1313 self.dirstate.drop(f)
1314 self.dirstate.setparents(ret)
1314 self.dirstate.setparents(ret)
1315 ms.reset()
1315 ms.reset()
1316 finally:
1316 finally:
1317 wlock.release()
1317 wlock.release()
1318
1318
1319 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1319 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1320 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1320 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1321 self._afterlock(commithook)
1321 self._afterlock(commithook)
1322 return ret
1322 return ret
1323
1323
1324 @unfilteredmethod
1324 @unfilteredmethod
1325 def commitctx(self, ctx, error=False):
1325 def commitctx(self, ctx, error=False):
1326 """Add a new revision to current repository.
1326 """Add a new revision to current repository.
1327 Revision information is passed via the context argument.
1327 Revision information is passed via the context argument.
1328 """
1328 """
1329
1329
1330 tr = lock = None
1330 tr = lock = None
1331 removed = list(ctx.removed())
1331 removed = list(ctx.removed())
1332 p1, p2 = ctx.p1(), ctx.p2()
1332 p1, p2 = ctx.p1(), ctx.p2()
1333 user = ctx.user()
1333 user = ctx.user()
1334
1334
1335 lock = self.lock()
1335 lock = self.lock()
1336 try:
1336 try:
1337 tr = self.transaction("commit")
1337 tr = self.transaction("commit")
1338 trp = weakref.proxy(tr)
1338 trp = weakref.proxy(tr)
1339
1339
1340 if ctx.files():
1340 if ctx.files():
1341 m1 = p1.manifest().copy()
1341 m1 = p1.manifest().copy()
1342 m2 = p2.manifest()
1342 m2 = p2.manifest()
1343
1343
1344 # check in files
1344 # check in files
1345 new = {}
1345 new = {}
1346 changed = []
1346 changed = []
1347 linkrev = len(self)
1347 linkrev = len(self)
1348 for f in sorted(ctx.modified() + ctx.added()):
1348 for f in sorted(ctx.modified() + ctx.added()):
1349 self.ui.note(f + "\n")
1349 self.ui.note(f + "\n")
1350 try:
1350 try:
1351 fctx = ctx[f]
1351 fctx = ctx[f]
1352 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1352 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1353 changed)
1353 changed)
1354 m1.set(f, fctx.flags())
1354 m1.set(f, fctx.flags())
1355 except OSError, inst:
1355 except OSError, inst:
1356 self.ui.warn(_("trouble committing %s!\n") % f)
1356 self.ui.warn(_("trouble committing %s!\n") % f)
1357 raise
1357 raise
1358 except IOError, inst:
1358 except IOError, inst:
1359 errcode = getattr(inst, 'errno', errno.ENOENT)
1359 errcode = getattr(inst, 'errno', errno.ENOENT)
1360 if error or errcode and errcode != errno.ENOENT:
1360 if error or errcode and errcode != errno.ENOENT:
1361 self.ui.warn(_("trouble committing %s!\n") % f)
1361 self.ui.warn(_("trouble committing %s!\n") % f)
1362 raise
1362 raise
1363 else:
1363 else:
1364 removed.append(f)
1364 removed.append(f)
1365
1365
1366 # update manifest
1366 # update manifest
1367 m1.update(new)
1367 m1.update(new)
1368 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1368 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1369 drop = [f for f in removed if f in m1]
1369 drop = [f for f in removed if f in m1]
1370 for f in drop:
1370 for f in drop:
1371 del m1[f]
1371 del m1[f]
1372 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1372 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1373 p2.manifestnode(), (new, drop))
1373 p2.manifestnode(), (new, drop))
1374 files = changed + removed
1374 files = changed + removed
1375 else:
1375 else:
1376 mn = p1.manifestnode()
1376 mn = p1.manifestnode()
1377 files = []
1377 files = []
1378
1378
1379 # update changelog
1379 # update changelog
1380 self.changelog.delayupdate()
1380 self.changelog.delayupdate()
1381 n = self.changelog.add(mn, files, ctx.description(),
1381 n = self.changelog.add(mn, files, ctx.description(),
1382 trp, p1.node(), p2.node(),
1382 trp, p1.node(), p2.node(),
1383 user, ctx.date(), ctx.extra().copy())
1383 user, ctx.date(), ctx.extra().copy())
1384 p = lambda: self.changelog.writepending() and self.root or ""
1384 p = lambda: self.changelog.writepending() and self.root or ""
1385 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1385 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1386 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1386 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1387 parent2=xp2, pending=p)
1387 parent2=xp2, pending=p)
1388 self.changelog.finalize(trp)
1388 self.changelog.finalize(trp)
1389 # set the new commit is proper phase
1389 # set the new commit is proper phase
1390 targetphase = phases.newcommitphase(self.ui)
1390 targetphase = phases.newcommitphase(self.ui)
1391 if targetphase:
1391 if targetphase:
1392 # retract boundary do not alter parent changeset.
1392 # retract boundary do not alter parent changeset.
1393 # if a parent have higher the resulting phase will
1393 # if a parent have higher the resulting phase will
1394 # be compliant anyway
1394 # be compliant anyway
1395 #
1395 #
1396 # if minimal phase was 0 we don't need to retract anything
1396 # if minimal phase was 0 we don't need to retract anything
1397 phases.retractboundary(self, targetphase, [n])
1397 phases.retractboundary(self, targetphase, [n])
1398 tr.close()
1398 tr.close()
1399 branchmap.updatecache(self)
1399 branchmap.updatecache(self)
1400 return n
1400 return n
1401 finally:
1401 finally:
1402 if tr:
1402 if tr:
1403 tr.release()
1403 tr.release()
1404 lock.release()
1404 lock.release()
1405
1405
1406 @unfilteredmethod
1406 @unfilteredmethod
1407 def destroyed(self, newheadnodes=None):
1407 def destroyed(self, newheadnodes=None):
1408 '''Inform the repository that nodes have been destroyed.
1408 '''Inform the repository that nodes have been destroyed.
1409 Intended for use by strip and rollback, so there's a common
1409 Intended for use by strip and rollback, so there's a common
1410 place for anything that has to be done after destroying history.
1410 place for anything that has to be done after destroying history.
1411
1411
1412 If you know the branchheadcache was uptodate before nodes were removed
1412 If you know the branchheadcache was uptodate before nodes were removed
1413 and you also know the set of candidate new heads that may have resulted
1413 and you also know the set of candidate new heads that may have resulted
1414 from the destruction, you can set newheadnodes. This will enable the
1414 from the destruction, you can set newheadnodes. This will enable the
1415 code to update the branchheads cache, rather than having future code
1415 code to update the branchheads cache, rather than having future code
1416 decide it's invalid and regenerating it from scratch.
1416 decide it's invalid and regenerating it from scratch.
1417 '''
1417 '''
1418 # If we have info, newheadnodes, on how to update the branch cache, do
1418 # If we have info, newheadnodes, on how to update the branch cache, do
1419 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1419 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1420 # will be caught the next time it is read.
1420 # will be caught the next time it is read.
1421 if newheadnodes:
1421 if newheadnodes:
1422 ctxgen = (self[node] for node in newheadnodes
1422 ctxgen = (self[node] for node in newheadnodes
1423 if self.changelog.hasnode(node))
1423 if self.changelog.hasnode(node))
1424 cache = self._branchcaches[None]
1424 cache = self._branchcaches[None]
1425 cache.update(self, ctxgen)
1425 cache.update(self, ctxgen)
1426 cache.write(self)
1426 cache.write(self)
1427
1427
1428 # When one tries to:
1429 # 1) destroy nodes thus calling this method (e.g. strip)
1430 # 2) use phasecache somewhere (e.g. commit)
1431 #
1432 # then 2) will fail because the phasecache contains nodes that were
1433 # removed. We can either remove phasecache from the filecache,
1434 # causing it to reload next time it is accessed, or simply filter
1435 # the removed nodes now and write the updated cache.
1436 if '_phasecache' in self._filecache:
1437 self._phasecache.filterunknown(self)
1438 self._phasecache.write()
1439
1428 # Ensure the persistent tag cache is updated. Doing it now
1440 # Ensure the persistent tag cache is updated. Doing it now
1429 # means that the tag cache only has to worry about destroyed
1441 # means that the tag cache only has to worry about destroyed
1430 # heads immediately after a strip/rollback. That in turn
1442 # heads immediately after a strip/rollback. That in turn
1431 # guarantees that "cachetip == currenttip" (comparing both rev
1443 # guarantees that "cachetip == currenttip" (comparing both rev
1432 # and node) always means no nodes have been added or destroyed.
1444 # and node) always means no nodes have been added or destroyed.
1433
1445
1434 # XXX this is suboptimal when qrefresh'ing: we strip the current
1446 # XXX this is suboptimal when qrefresh'ing: we strip the current
1435 # head, refresh the tag cache, then immediately add a new head.
1447 # head, refresh the tag cache, then immediately add a new head.
1436 # But I think doing it this way is necessary for the "instant
1448 # But I think doing it this way is necessary for the "instant
1437 # tag cache retrieval" case to work.
1449 # tag cache retrieval" case to work.
1438 self.invalidatecaches()
1450 self.invalidatecaches()
1439
1451
1440 # Discard all cache entries to force reloading everything.
1452 # Discard all cache entries to force reloading everything.
1441 self._filecache.clear()
1453 self._filecache.clear()
1442
1454
1443 def walk(self, match, node=None):
1455 def walk(self, match, node=None):
1444 '''
1456 '''
1445 walk recursively through the directory tree or a given
1457 walk recursively through the directory tree or a given
1446 changeset, finding all files matched by the match
1458 changeset, finding all files matched by the match
1447 function
1459 function
1448 '''
1460 '''
1449 return self[node].walk(match)
1461 return self[node].walk(match)
1450
1462
1451 def status(self, node1='.', node2=None, match=None,
1463 def status(self, node1='.', node2=None, match=None,
1452 ignored=False, clean=False, unknown=False,
1464 ignored=False, clean=False, unknown=False,
1453 listsubrepos=False):
1465 listsubrepos=False):
1454 """return status of files between two nodes or node and working
1466 """return status of files between two nodes or node and working
1455 directory.
1467 directory.
1456
1468
1457 If node1 is None, use the first dirstate parent instead.
1469 If node1 is None, use the first dirstate parent instead.
1458 If node2 is None, compare node1 with working directory.
1470 If node2 is None, compare node1 with working directory.
1459 """
1471 """
1460
1472
1461 def mfmatches(ctx):
1473 def mfmatches(ctx):
1462 mf = ctx.manifest().copy()
1474 mf = ctx.manifest().copy()
1463 if match.always():
1475 if match.always():
1464 return mf
1476 return mf
1465 for fn in mf.keys():
1477 for fn in mf.keys():
1466 if not match(fn):
1478 if not match(fn):
1467 del mf[fn]
1479 del mf[fn]
1468 return mf
1480 return mf
1469
1481
1470 if isinstance(node1, context.changectx):
1482 if isinstance(node1, context.changectx):
1471 ctx1 = node1
1483 ctx1 = node1
1472 else:
1484 else:
1473 ctx1 = self[node1]
1485 ctx1 = self[node1]
1474 if isinstance(node2, context.changectx):
1486 if isinstance(node2, context.changectx):
1475 ctx2 = node2
1487 ctx2 = node2
1476 else:
1488 else:
1477 ctx2 = self[node2]
1489 ctx2 = self[node2]
1478
1490
1479 working = ctx2.rev() is None
1491 working = ctx2.rev() is None
1480 parentworking = working and ctx1 == self['.']
1492 parentworking = working and ctx1 == self['.']
1481 match = match or matchmod.always(self.root, self.getcwd())
1493 match = match or matchmod.always(self.root, self.getcwd())
1482 listignored, listclean, listunknown = ignored, clean, unknown
1494 listignored, listclean, listunknown = ignored, clean, unknown
1483
1495
1484 # load earliest manifest first for caching reasons
1496 # load earliest manifest first for caching reasons
1485 if not working and ctx2.rev() < ctx1.rev():
1497 if not working and ctx2.rev() < ctx1.rev():
1486 ctx2.manifest()
1498 ctx2.manifest()
1487
1499
1488 if not parentworking:
1500 if not parentworking:
1489 def bad(f, msg):
1501 def bad(f, msg):
1490 # 'f' may be a directory pattern from 'match.files()',
1502 # 'f' may be a directory pattern from 'match.files()',
1491 # so 'f not in ctx1' is not enough
1503 # so 'f not in ctx1' is not enough
1492 if f not in ctx1 and f not in ctx1.dirs():
1504 if f not in ctx1 and f not in ctx1.dirs():
1493 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1505 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1494 match.bad = bad
1506 match.bad = bad
1495
1507
1496 if working: # we need to scan the working dir
1508 if working: # we need to scan the working dir
1497 subrepos = []
1509 subrepos = []
1498 if '.hgsub' in self.dirstate:
1510 if '.hgsub' in self.dirstate:
1499 subrepos = ctx2.substate.keys()
1511 subrepos = ctx2.substate.keys()
1500 s = self.dirstate.status(match, subrepos, listignored,
1512 s = self.dirstate.status(match, subrepos, listignored,
1501 listclean, listunknown)
1513 listclean, listunknown)
1502 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1514 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1503
1515
1504 # check for any possibly clean files
1516 # check for any possibly clean files
1505 if parentworking and cmp:
1517 if parentworking and cmp:
1506 fixup = []
1518 fixup = []
1507 # do a full compare of any files that might have changed
1519 # do a full compare of any files that might have changed
1508 for f in sorted(cmp):
1520 for f in sorted(cmp):
1509 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1521 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1510 or ctx1[f].cmp(ctx2[f])):
1522 or ctx1[f].cmp(ctx2[f])):
1511 modified.append(f)
1523 modified.append(f)
1512 else:
1524 else:
1513 fixup.append(f)
1525 fixup.append(f)
1514
1526
1515 # update dirstate for files that are actually clean
1527 # update dirstate for files that are actually clean
1516 if fixup:
1528 if fixup:
1517 if listclean:
1529 if listclean:
1518 clean += fixup
1530 clean += fixup
1519
1531
1520 try:
1532 try:
1521 # updating the dirstate is optional
1533 # updating the dirstate is optional
1522 # so we don't wait on the lock
1534 # so we don't wait on the lock
1523 wlock = self.wlock(False)
1535 wlock = self.wlock(False)
1524 try:
1536 try:
1525 for f in fixup:
1537 for f in fixup:
1526 self.dirstate.normal(f)
1538 self.dirstate.normal(f)
1527 finally:
1539 finally:
1528 wlock.release()
1540 wlock.release()
1529 except error.LockError:
1541 except error.LockError:
1530 pass
1542 pass
1531
1543
1532 if not parentworking:
1544 if not parentworking:
1533 mf1 = mfmatches(ctx1)
1545 mf1 = mfmatches(ctx1)
1534 if working:
1546 if working:
1535 # we are comparing working dir against non-parent
1547 # we are comparing working dir against non-parent
1536 # generate a pseudo-manifest for the working dir
1548 # generate a pseudo-manifest for the working dir
1537 mf2 = mfmatches(self['.'])
1549 mf2 = mfmatches(self['.'])
1538 for f in cmp + modified + added:
1550 for f in cmp + modified + added:
1539 mf2[f] = None
1551 mf2[f] = None
1540 mf2.set(f, ctx2.flags(f))
1552 mf2.set(f, ctx2.flags(f))
1541 for f in removed:
1553 for f in removed:
1542 if f in mf2:
1554 if f in mf2:
1543 del mf2[f]
1555 del mf2[f]
1544 else:
1556 else:
1545 # we are comparing two revisions
1557 # we are comparing two revisions
1546 deleted, unknown, ignored = [], [], []
1558 deleted, unknown, ignored = [], [], []
1547 mf2 = mfmatches(ctx2)
1559 mf2 = mfmatches(ctx2)
1548
1560
1549 modified, added, clean = [], [], []
1561 modified, added, clean = [], [], []
1550 withflags = mf1.withflags() | mf2.withflags()
1562 withflags = mf1.withflags() | mf2.withflags()
1551 for fn in mf2:
1563 for fn in mf2:
1552 if fn in mf1:
1564 if fn in mf1:
1553 if (fn not in deleted and
1565 if (fn not in deleted and
1554 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1566 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1555 (mf1[fn] != mf2[fn] and
1567 (mf1[fn] != mf2[fn] and
1556 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1568 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1557 modified.append(fn)
1569 modified.append(fn)
1558 elif listclean:
1570 elif listclean:
1559 clean.append(fn)
1571 clean.append(fn)
1560 del mf1[fn]
1572 del mf1[fn]
1561 elif fn not in deleted:
1573 elif fn not in deleted:
1562 added.append(fn)
1574 added.append(fn)
1563 removed = mf1.keys()
1575 removed = mf1.keys()
1564
1576
1565 if working and modified and not self.dirstate._checklink:
1577 if working and modified and not self.dirstate._checklink:
1566 # Symlink placeholders may get non-symlink-like contents
1578 # Symlink placeholders may get non-symlink-like contents
1567 # via user error or dereferencing by NFS or Samba servers,
1579 # via user error or dereferencing by NFS or Samba servers,
1568 # so we filter out any placeholders that don't look like a
1580 # so we filter out any placeholders that don't look like a
1569 # symlink
1581 # symlink
1570 sane = []
1582 sane = []
1571 for f in modified:
1583 for f in modified:
1572 if ctx2.flags(f) == 'l':
1584 if ctx2.flags(f) == 'l':
1573 d = ctx2[f].data()
1585 d = ctx2[f].data()
1574 if len(d) >= 1024 or '\n' in d or util.binary(d):
1586 if len(d) >= 1024 or '\n' in d or util.binary(d):
1575 self.ui.debug('ignoring suspect symlink placeholder'
1587 self.ui.debug('ignoring suspect symlink placeholder'
1576 ' "%s"\n' % f)
1588 ' "%s"\n' % f)
1577 continue
1589 continue
1578 sane.append(f)
1590 sane.append(f)
1579 modified = sane
1591 modified = sane
1580
1592
1581 r = modified, added, removed, deleted, unknown, ignored, clean
1593 r = modified, added, removed, deleted, unknown, ignored, clean
1582
1594
1583 if listsubrepos:
1595 if listsubrepos:
1584 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1596 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1585 if working:
1597 if working:
1586 rev2 = None
1598 rev2 = None
1587 else:
1599 else:
1588 rev2 = ctx2.substate[subpath][1]
1600 rev2 = ctx2.substate[subpath][1]
1589 try:
1601 try:
1590 submatch = matchmod.narrowmatcher(subpath, match)
1602 submatch = matchmod.narrowmatcher(subpath, match)
1591 s = sub.status(rev2, match=submatch, ignored=listignored,
1603 s = sub.status(rev2, match=submatch, ignored=listignored,
1592 clean=listclean, unknown=listunknown,
1604 clean=listclean, unknown=listunknown,
1593 listsubrepos=True)
1605 listsubrepos=True)
1594 for rfiles, sfiles in zip(r, s):
1606 for rfiles, sfiles in zip(r, s):
1595 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1607 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1596 except error.LookupError:
1608 except error.LookupError:
1597 self.ui.status(_("skipping missing subrepository: %s\n")
1609 self.ui.status(_("skipping missing subrepository: %s\n")
1598 % subpath)
1610 % subpath)
1599
1611
1600 for l in r:
1612 for l in r:
1601 l.sort()
1613 l.sort()
1602 return r
1614 return r
1603
1615
1604 def heads(self, start=None):
1616 def heads(self, start=None):
1605 heads = self.changelog.heads(start)
1617 heads = self.changelog.heads(start)
1606 # sort the output in rev descending order
1618 # sort the output in rev descending order
1607 return sorted(heads, key=self.changelog.rev, reverse=True)
1619 return sorted(heads, key=self.changelog.rev, reverse=True)
1608
1620
1609 def branchheads(self, branch=None, start=None, closed=False):
1621 def branchheads(self, branch=None, start=None, closed=False):
1610 '''return a (possibly filtered) list of heads for the given branch
1622 '''return a (possibly filtered) list of heads for the given branch
1611
1623
1612 Heads are returned in topological order, from newest to oldest.
1624 Heads are returned in topological order, from newest to oldest.
1613 If branch is None, use the dirstate branch.
1625 If branch is None, use the dirstate branch.
1614 If start is not None, return only heads reachable from start.
1626 If start is not None, return only heads reachable from start.
1615 If closed is True, return heads that are marked as closed as well.
1627 If closed is True, return heads that are marked as closed as well.
1616 '''
1628 '''
1617 if branch is None:
1629 if branch is None:
1618 branch = self[None].branch()
1630 branch = self[None].branch()
1619 branches = self.branchmap()
1631 branches = self.branchmap()
1620 if branch not in branches:
1632 if branch not in branches:
1621 return []
1633 return []
1622 # the cache returns heads ordered lowest to highest
1634 # the cache returns heads ordered lowest to highest
1623 bheads = list(reversed(branches[branch]))
1635 bheads = list(reversed(branches[branch]))
1624 if start is not None:
1636 if start is not None:
1625 # filter out the heads that cannot be reached from startrev
1637 # filter out the heads that cannot be reached from startrev
1626 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1638 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1627 bheads = [h for h in bheads if h in fbheads]
1639 bheads = [h for h in bheads if h in fbheads]
1628 if not closed:
1640 if not closed:
1629 bheads = [h for h in bheads if not self[h].closesbranch()]
1641 bheads = [h for h in bheads if not self[h].closesbranch()]
1630 return bheads
1642 return bheads
1631
1643
1632 def branches(self, nodes):
1644 def branches(self, nodes):
1633 if not nodes:
1645 if not nodes:
1634 nodes = [self.changelog.tip()]
1646 nodes = [self.changelog.tip()]
1635 b = []
1647 b = []
1636 for n in nodes:
1648 for n in nodes:
1637 t = n
1649 t = n
1638 while True:
1650 while True:
1639 p = self.changelog.parents(n)
1651 p = self.changelog.parents(n)
1640 if p[1] != nullid or p[0] == nullid:
1652 if p[1] != nullid or p[0] == nullid:
1641 b.append((t, n, p[0], p[1]))
1653 b.append((t, n, p[0], p[1]))
1642 break
1654 break
1643 n = p[0]
1655 n = p[0]
1644 return b
1656 return b
1645
1657
1646 def between(self, pairs):
1658 def between(self, pairs):
1647 r = []
1659 r = []
1648
1660
1649 for top, bottom in pairs:
1661 for top, bottom in pairs:
1650 n, l, i = top, [], 0
1662 n, l, i = top, [], 0
1651 f = 1
1663 f = 1
1652
1664
1653 while n != bottom and n != nullid:
1665 while n != bottom and n != nullid:
1654 p = self.changelog.parents(n)[0]
1666 p = self.changelog.parents(n)[0]
1655 if i == f:
1667 if i == f:
1656 l.append(n)
1668 l.append(n)
1657 f = f * 2
1669 f = f * 2
1658 n = p
1670 n = p
1659 i += 1
1671 i += 1
1660
1672
1661 r.append(l)
1673 r.append(l)
1662
1674
1663 return r
1675 return r
1664
1676
1665 def pull(self, remote, heads=None, force=False):
1677 def pull(self, remote, heads=None, force=False):
1666 # don't open transaction for nothing or you break future useful
1678 # don't open transaction for nothing or you break future useful
1667 # rollback call
1679 # rollback call
1668 tr = None
1680 tr = None
1669 trname = 'pull\n' + util.hidepassword(remote.url())
1681 trname = 'pull\n' + util.hidepassword(remote.url())
1670 lock = self.lock()
1682 lock = self.lock()
1671 try:
1683 try:
1672 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1684 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1673 force=force)
1685 force=force)
1674 common, fetch, rheads = tmp
1686 common, fetch, rheads = tmp
1675 if not fetch:
1687 if not fetch:
1676 self.ui.status(_("no changes found\n"))
1688 self.ui.status(_("no changes found\n"))
1677 added = []
1689 added = []
1678 result = 0
1690 result = 0
1679 else:
1691 else:
1680 tr = self.transaction(trname)
1692 tr = self.transaction(trname)
1681 if heads is None and list(common) == [nullid]:
1693 if heads is None and list(common) == [nullid]:
1682 self.ui.status(_("requesting all changes\n"))
1694 self.ui.status(_("requesting all changes\n"))
1683 elif heads is None and remote.capable('changegroupsubset'):
1695 elif heads is None and remote.capable('changegroupsubset'):
1684 # issue1320, avoid a race if remote changed after discovery
1696 # issue1320, avoid a race if remote changed after discovery
1685 heads = rheads
1697 heads = rheads
1686
1698
1687 if remote.capable('getbundle'):
1699 if remote.capable('getbundle'):
1688 cg = remote.getbundle('pull', common=common,
1700 cg = remote.getbundle('pull', common=common,
1689 heads=heads or rheads)
1701 heads=heads or rheads)
1690 elif heads is None:
1702 elif heads is None:
1691 cg = remote.changegroup(fetch, 'pull')
1703 cg = remote.changegroup(fetch, 'pull')
1692 elif not remote.capable('changegroupsubset'):
1704 elif not remote.capable('changegroupsubset'):
1693 raise util.Abort(_("partial pull cannot be done because "
1705 raise util.Abort(_("partial pull cannot be done because "
1694 "other repository doesn't support "
1706 "other repository doesn't support "
1695 "changegroupsubset."))
1707 "changegroupsubset."))
1696 else:
1708 else:
1697 cg = remote.changegroupsubset(fetch, heads, 'pull')
1709 cg = remote.changegroupsubset(fetch, heads, 'pull')
1698 clstart = len(self.changelog)
1710 clstart = len(self.changelog)
1699 result = self.addchangegroup(cg, 'pull', remote.url())
1711 result = self.addchangegroup(cg, 'pull', remote.url())
1700 clend = len(self.changelog)
1712 clend = len(self.changelog)
1701 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1713 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1702
1714
1703 # compute target subset
1715 # compute target subset
1704 if heads is None:
1716 if heads is None:
1705 # We pulled every thing possible
1717 # We pulled every thing possible
1706 # sync on everything common
1718 # sync on everything common
1707 subset = common + added
1719 subset = common + added
1708 else:
1720 else:
1709 # We pulled a specific subset
1721 # We pulled a specific subset
1710 # sync on this subset
1722 # sync on this subset
1711 subset = heads
1723 subset = heads
1712
1724
1713 # Get remote phases data from remote
1725 # Get remote phases data from remote
1714 remotephases = remote.listkeys('phases')
1726 remotephases = remote.listkeys('phases')
1715 publishing = bool(remotephases.get('publishing', False))
1727 publishing = bool(remotephases.get('publishing', False))
1716 if remotephases and not publishing:
1728 if remotephases and not publishing:
1717 # remote is new and unpublishing
1729 # remote is new and unpublishing
1718 pheads, _dr = phases.analyzeremotephases(self, subset,
1730 pheads, _dr = phases.analyzeremotephases(self, subset,
1719 remotephases)
1731 remotephases)
1720 phases.advanceboundary(self, phases.public, pheads)
1732 phases.advanceboundary(self, phases.public, pheads)
1721 phases.advanceboundary(self, phases.draft, subset)
1733 phases.advanceboundary(self, phases.draft, subset)
1722 else:
1734 else:
1723 # Remote is old or publishing all common changesets
1735 # Remote is old or publishing all common changesets
1724 # should be seen as public
1736 # should be seen as public
1725 phases.advanceboundary(self, phases.public, subset)
1737 phases.advanceboundary(self, phases.public, subset)
1726
1738
1727 if obsolete._enabled:
1739 if obsolete._enabled:
1728 self.ui.debug('fetching remote obsolete markers\n')
1740 self.ui.debug('fetching remote obsolete markers\n')
1729 remoteobs = remote.listkeys('obsolete')
1741 remoteobs = remote.listkeys('obsolete')
1730 if 'dump0' in remoteobs:
1742 if 'dump0' in remoteobs:
1731 if tr is None:
1743 if tr is None:
1732 tr = self.transaction(trname)
1744 tr = self.transaction(trname)
1733 for key in sorted(remoteobs, reverse=True):
1745 for key in sorted(remoteobs, reverse=True):
1734 if key.startswith('dump'):
1746 if key.startswith('dump'):
1735 data = base85.b85decode(remoteobs[key])
1747 data = base85.b85decode(remoteobs[key])
1736 self.obsstore.mergemarkers(tr, data)
1748 self.obsstore.mergemarkers(tr, data)
1737 self.invalidatevolatilesets()
1749 self.invalidatevolatilesets()
1738 if tr is not None:
1750 if tr is not None:
1739 tr.close()
1751 tr.close()
1740 finally:
1752 finally:
1741 if tr is not None:
1753 if tr is not None:
1742 tr.release()
1754 tr.release()
1743 lock.release()
1755 lock.release()
1744
1756
1745 return result
1757 return result
1746
1758
1747 def checkpush(self, force, revs):
1759 def checkpush(self, force, revs):
1748 """Extensions can override this function if additional checks have
1760 """Extensions can override this function if additional checks have
1749 to be performed before pushing, or call it if they override push
1761 to be performed before pushing, or call it if they override push
1750 command.
1762 command.
1751 """
1763 """
1752 pass
1764 pass
1753
1765
1754 def push(self, remote, force=False, revs=None, newbranch=False):
1766 def push(self, remote, force=False, revs=None, newbranch=False):
1755 '''Push outgoing changesets (limited by revs) from the current
1767 '''Push outgoing changesets (limited by revs) from the current
1756 repository to remote. Return an integer:
1768 repository to remote. Return an integer:
1757 - None means nothing to push
1769 - None means nothing to push
1758 - 0 means HTTP error
1770 - 0 means HTTP error
1759 - 1 means we pushed and remote head count is unchanged *or*
1771 - 1 means we pushed and remote head count is unchanged *or*
1760 we have outgoing changesets but refused to push
1772 we have outgoing changesets but refused to push
1761 - other values as described by addchangegroup()
1773 - other values as described by addchangegroup()
1762 '''
1774 '''
1763 # there are two ways to push to remote repo:
1775 # there are two ways to push to remote repo:
1764 #
1776 #
1765 # addchangegroup assumes local user can lock remote
1777 # addchangegroup assumes local user can lock remote
1766 # repo (local filesystem, old ssh servers).
1778 # repo (local filesystem, old ssh servers).
1767 #
1779 #
1768 # unbundle assumes local user cannot lock remote repo (new ssh
1780 # unbundle assumes local user cannot lock remote repo (new ssh
1769 # servers, http servers).
1781 # servers, http servers).
1770
1782
1771 if not remote.canpush():
1783 if not remote.canpush():
1772 raise util.Abort(_("destination does not support push"))
1784 raise util.Abort(_("destination does not support push"))
1773 unfi = self.unfiltered()
1785 unfi = self.unfiltered()
1774 # get local lock as we might write phase data
1786 # get local lock as we might write phase data
1775 locallock = self.lock()
1787 locallock = self.lock()
1776 try:
1788 try:
1777 self.checkpush(force, revs)
1789 self.checkpush(force, revs)
1778 lock = None
1790 lock = None
1779 unbundle = remote.capable('unbundle')
1791 unbundle = remote.capable('unbundle')
1780 if not unbundle:
1792 if not unbundle:
1781 lock = remote.lock()
1793 lock = remote.lock()
1782 try:
1794 try:
1783 # discovery
1795 # discovery
1784 fci = discovery.findcommonincoming
1796 fci = discovery.findcommonincoming
1785 commoninc = fci(unfi, remote, force=force)
1797 commoninc = fci(unfi, remote, force=force)
1786 common, inc, remoteheads = commoninc
1798 common, inc, remoteheads = commoninc
1787 fco = discovery.findcommonoutgoing
1799 fco = discovery.findcommonoutgoing
1788 outgoing = fco(unfi, remote, onlyheads=revs,
1800 outgoing = fco(unfi, remote, onlyheads=revs,
1789 commoninc=commoninc, force=force)
1801 commoninc=commoninc, force=force)
1790
1802
1791
1803
1792 if not outgoing.missing:
1804 if not outgoing.missing:
1793 # nothing to push
1805 # nothing to push
1794 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1806 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1795 ret = None
1807 ret = None
1796 else:
1808 else:
1797 # something to push
1809 # something to push
1798 if not force:
1810 if not force:
1799 # if self.obsstore == False --> no obsolete
1811 # if self.obsstore == False --> no obsolete
1800 # then, save the iteration
1812 # then, save the iteration
1801 if unfi.obsstore:
1813 if unfi.obsstore:
1802 # this message are here for 80 char limit reason
1814 # this message are here for 80 char limit reason
1803 mso = _("push includes obsolete changeset: %s!")
1815 mso = _("push includes obsolete changeset: %s!")
1804 mst = "push includes %s changeset: %s!"
1816 mst = "push includes %s changeset: %s!"
1805 # plain versions for i18n tool to detect them
1817 # plain versions for i18n tool to detect them
1806 _("push includes unstable changeset: %s!")
1818 _("push includes unstable changeset: %s!")
1807 _("push includes bumped changeset: %s!")
1819 _("push includes bumped changeset: %s!")
1808 _("push includes divergent changeset: %s!")
1820 _("push includes divergent changeset: %s!")
1809 # If we are to push if there is at least one
1821 # If we are to push if there is at least one
1810 # obsolete or unstable changeset in missing, at
1822 # obsolete or unstable changeset in missing, at
1811 # least one of the missinghead will be obsolete or
1823 # least one of the missinghead will be obsolete or
1812 # unstable. So checking heads only is ok
1824 # unstable. So checking heads only is ok
1813 for node in outgoing.missingheads:
1825 for node in outgoing.missingheads:
1814 ctx = unfi[node]
1826 ctx = unfi[node]
1815 if ctx.obsolete():
1827 if ctx.obsolete():
1816 raise util.Abort(mso % ctx)
1828 raise util.Abort(mso % ctx)
1817 elif ctx.troubled():
1829 elif ctx.troubled():
1818 raise util.Abort(_(mst)
1830 raise util.Abort(_(mst)
1819 % (ctx.troubles()[0],
1831 % (ctx.troubles()[0],
1820 ctx))
1832 ctx))
1821 discovery.checkheads(unfi, remote, outgoing,
1833 discovery.checkheads(unfi, remote, outgoing,
1822 remoteheads, newbranch,
1834 remoteheads, newbranch,
1823 bool(inc))
1835 bool(inc))
1824
1836
1825 # create a changegroup from local
1837 # create a changegroup from local
1826 if revs is None and not outgoing.excluded:
1838 if revs is None and not outgoing.excluded:
1827 # push everything,
1839 # push everything,
1828 # use the fast path, no race possible on push
1840 # use the fast path, no race possible on push
1829 cg = self._changegroup(outgoing.missing, 'push')
1841 cg = self._changegroup(outgoing.missing, 'push')
1830 else:
1842 else:
1831 cg = self.getlocalbundle('push', outgoing)
1843 cg = self.getlocalbundle('push', outgoing)
1832
1844
1833 # apply changegroup to remote
1845 # apply changegroup to remote
1834 if unbundle:
1846 if unbundle:
1835 # local repo finds heads on server, finds out what
1847 # local repo finds heads on server, finds out what
1836 # revs it must push. once revs transferred, if server
1848 # revs it must push. once revs transferred, if server
1837 # finds it has different heads (someone else won
1849 # finds it has different heads (someone else won
1838 # commit/push race), server aborts.
1850 # commit/push race), server aborts.
1839 if force:
1851 if force:
1840 remoteheads = ['force']
1852 remoteheads = ['force']
1841 # ssh: return remote's addchangegroup()
1853 # ssh: return remote's addchangegroup()
1842 # http: return remote's addchangegroup() or 0 for error
1854 # http: return remote's addchangegroup() or 0 for error
1843 ret = remote.unbundle(cg, remoteheads, 'push')
1855 ret = remote.unbundle(cg, remoteheads, 'push')
1844 else:
1856 else:
1845 # we return an integer indicating remote head count
1857 # we return an integer indicating remote head count
1846 # change
1858 # change
1847 ret = remote.addchangegroup(cg, 'push', self.url())
1859 ret = remote.addchangegroup(cg, 'push', self.url())
1848
1860
1849 if ret:
1861 if ret:
1850 # push succeed, synchronize target of the push
1862 # push succeed, synchronize target of the push
1851 cheads = outgoing.missingheads
1863 cheads = outgoing.missingheads
1852 elif revs is None:
1864 elif revs is None:
1853 # All out push fails. synchronize all common
1865 # All out push fails. synchronize all common
1854 cheads = outgoing.commonheads
1866 cheads = outgoing.commonheads
1855 else:
1867 else:
1856 # I want cheads = heads(::missingheads and ::commonheads)
1868 # I want cheads = heads(::missingheads and ::commonheads)
1857 # (missingheads is revs with secret changeset filtered out)
1869 # (missingheads is revs with secret changeset filtered out)
1858 #
1870 #
1859 # This can be expressed as:
1871 # This can be expressed as:
1860 # cheads = ( (missingheads and ::commonheads)
1872 # cheads = ( (missingheads and ::commonheads)
1861 # + (commonheads and ::missingheads))"
1873 # + (commonheads and ::missingheads))"
1862 # )
1874 # )
1863 #
1875 #
1864 # while trying to push we already computed the following:
1876 # while trying to push we already computed the following:
1865 # common = (::commonheads)
1877 # common = (::commonheads)
1866 # missing = ((commonheads::missingheads) - commonheads)
1878 # missing = ((commonheads::missingheads) - commonheads)
1867 #
1879 #
1868 # We can pick:
1880 # We can pick:
1869 # * missingheads part of common (::commonheads)
1881 # * missingheads part of common (::commonheads)
1870 common = set(outgoing.common)
1882 common = set(outgoing.common)
1871 cheads = [node for node in revs if node in common]
1883 cheads = [node for node in revs if node in common]
1872 # and
1884 # and
1873 # * commonheads parents on missing
1885 # * commonheads parents on missing
1874 revset = unfi.set('%ln and parents(roots(%ln))',
1886 revset = unfi.set('%ln and parents(roots(%ln))',
1875 outgoing.commonheads,
1887 outgoing.commonheads,
1876 outgoing.missing)
1888 outgoing.missing)
1877 cheads.extend(c.node() for c in revset)
1889 cheads.extend(c.node() for c in revset)
1878 # even when we don't push, exchanging phase data is useful
1890 # even when we don't push, exchanging phase data is useful
1879 remotephases = remote.listkeys('phases')
1891 remotephases = remote.listkeys('phases')
1880 if not remotephases: # old server or public only repo
1892 if not remotephases: # old server or public only repo
1881 phases.advanceboundary(self, phases.public, cheads)
1893 phases.advanceboundary(self, phases.public, cheads)
1882 # don't push any phase data as there is nothing to push
1894 # don't push any phase data as there is nothing to push
1883 else:
1895 else:
1884 ana = phases.analyzeremotephases(self, cheads, remotephases)
1896 ana = phases.analyzeremotephases(self, cheads, remotephases)
1885 pheads, droots = ana
1897 pheads, droots = ana
1886 ### Apply remote phase on local
1898 ### Apply remote phase on local
1887 if remotephases.get('publishing', False):
1899 if remotephases.get('publishing', False):
1888 phases.advanceboundary(self, phases.public, cheads)
1900 phases.advanceboundary(self, phases.public, cheads)
1889 else: # publish = False
1901 else: # publish = False
1890 phases.advanceboundary(self, phases.public, pheads)
1902 phases.advanceboundary(self, phases.public, pheads)
1891 phases.advanceboundary(self, phases.draft, cheads)
1903 phases.advanceboundary(self, phases.draft, cheads)
1892 ### Apply local phase on remote
1904 ### Apply local phase on remote
1893
1905
1894 # Get the list of all revs draft on remote by public here.
1906 # Get the list of all revs draft on remote by public here.
1895 # XXX Beware that revset break if droots is not strictly
1907 # XXX Beware that revset break if droots is not strictly
1896 # XXX root we may want to ensure it is but it is costly
1908 # XXX root we may want to ensure it is but it is costly
1897 outdated = unfi.set('heads((%ln::%ln) and public())',
1909 outdated = unfi.set('heads((%ln::%ln) and public())',
1898 droots, cheads)
1910 droots, cheads)
1899 for newremotehead in outdated:
1911 for newremotehead in outdated:
1900 r = remote.pushkey('phases',
1912 r = remote.pushkey('phases',
1901 newremotehead.hex(),
1913 newremotehead.hex(),
1902 str(phases.draft),
1914 str(phases.draft),
1903 str(phases.public))
1915 str(phases.public))
1904 if not r:
1916 if not r:
1905 self.ui.warn(_('updating %s to public failed!\n')
1917 self.ui.warn(_('updating %s to public failed!\n')
1906 % newremotehead)
1918 % newremotehead)
1907 self.ui.debug('try to push obsolete markers to remote\n')
1919 self.ui.debug('try to push obsolete markers to remote\n')
1908 if (obsolete._enabled and self.obsstore and
1920 if (obsolete._enabled and self.obsstore and
1909 'obsolete' in remote.listkeys('namespaces')):
1921 'obsolete' in remote.listkeys('namespaces')):
1910 rslts = []
1922 rslts = []
1911 remotedata = self.listkeys('obsolete')
1923 remotedata = self.listkeys('obsolete')
1912 for key in sorted(remotedata, reverse=True):
1924 for key in sorted(remotedata, reverse=True):
1913 # reverse sort to ensure we end with dump0
1925 # reverse sort to ensure we end with dump0
1914 data = remotedata[key]
1926 data = remotedata[key]
1915 rslts.append(remote.pushkey('obsolete', key, '', data))
1927 rslts.append(remote.pushkey('obsolete', key, '', data))
1916 if [r for r in rslts if not r]:
1928 if [r for r in rslts if not r]:
1917 msg = _('failed to push some obsolete markers!\n')
1929 msg = _('failed to push some obsolete markers!\n')
1918 self.ui.warn(msg)
1930 self.ui.warn(msg)
1919 finally:
1931 finally:
1920 if lock is not None:
1932 if lock is not None:
1921 lock.release()
1933 lock.release()
1922 finally:
1934 finally:
1923 locallock.release()
1935 locallock.release()
1924
1936
1925 self.ui.debug("checking for updated bookmarks\n")
1937 self.ui.debug("checking for updated bookmarks\n")
1926 rb = remote.listkeys('bookmarks')
1938 rb = remote.listkeys('bookmarks')
1927 for k in rb.keys():
1939 for k in rb.keys():
1928 if k in unfi._bookmarks:
1940 if k in unfi._bookmarks:
1929 nr, nl = rb[k], hex(self._bookmarks[k])
1941 nr, nl = rb[k], hex(self._bookmarks[k])
1930 if nr in unfi:
1942 if nr in unfi:
1931 cr = unfi[nr]
1943 cr = unfi[nr]
1932 cl = unfi[nl]
1944 cl = unfi[nl]
1933 if bookmarks.validdest(unfi, cr, cl):
1945 if bookmarks.validdest(unfi, cr, cl):
1934 r = remote.pushkey('bookmarks', k, nr, nl)
1946 r = remote.pushkey('bookmarks', k, nr, nl)
1935 if r:
1947 if r:
1936 self.ui.status(_("updating bookmark %s\n") % k)
1948 self.ui.status(_("updating bookmark %s\n") % k)
1937 else:
1949 else:
1938 self.ui.warn(_('updating bookmark %s'
1950 self.ui.warn(_('updating bookmark %s'
1939 ' failed!\n') % k)
1951 ' failed!\n') % k)
1940
1952
1941 return ret
1953 return ret
1942
1954
1943 def changegroupinfo(self, nodes, source):
1955 def changegroupinfo(self, nodes, source):
1944 if self.ui.verbose or source == 'bundle':
1956 if self.ui.verbose or source == 'bundle':
1945 self.ui.status(_("%d changesets found\n") % len(nodes))
1957 self.ui.status(_("%d changesets found\n") % len(nodes))
1946 if self.ui.debugflag:
1958 if self.ui.debugflag:
1947 self.ui.debug("list of changesets:\n")
1959 self.ui.debug("list of changesets:\n")
1948 for node in nodes:
1960 for node in nodes:
1949 self.ui.debug("%s\n" % hex(node))
1961 self.ui.debug("%s\n" % hex(node))
1950
1962
1951 def changegroupsubset(self, bases, heads, source):
1963 def changegroupsubset(self, bases, heads, source):
1952 """Compute a changegroup consisting of all the nodes that are
1964 """Compute a changegroup consisting of all the nodes that are
1953 descendants of any of the bases and ancestors of any of the heads.
1965 descendants of any of the bases and ancestors of any of the heads.
1954 Return a chunkbuffer object whose read() method will return
1966 Return a chunkbuffer object whose read() method will return
1955 successive changegroup chunks.
1967 successive changegroup chunks.
1956
1968
1957 It is fairly complex as determining which filenodes and which
1969 It is fairly complex as determining which filenodes and which
1958 manifest nodes need to be included for the changeset to be complete
1970 manifest nodes need to be included for the changeset to be complete
1959 is non-trivial.
1971 is non-trivial.
1960
1972
1961 Another wrinkle is doing the reverse, figuring out which changeset in
1973 Another wrinkle is doing the reverse, figuring out which changeset in
1962 the changegroup a particular filenode or manifestnode belongs to.
1974 the changegroup a particular filenode or manifestnode belongs to.
1963 """
1975 """
1964 cl = self.changelog
1976 cl = self.changelog
1965 if not bases:
1977 if not bases:
1966 bases = [nullid]
1978 bases = [nullid]
1967 csets, bases, heads = cl.nodesbetween(bases, heads)
1979 csets, bases, heads = cl.nodesbetween(bases, heads)
1968 # We assume that all ancestors of bases are known
1980 # We assume that all ancestors of bases are known
1969 common = cl.ancestors([cl.rev(n) for n in bases])
1981 common = cl.ancestors([cl.rev(n) for n in bases])
1970 return self._changegroupsubset(common, csets, heads, source)
1982 return self._changegroupsubset(common, csets, heads, source)
1971
1983
1972 def getlocalbundle(self, source, outgoing):
1984 def getlocalbundle(self, source, outgoing):
1973 """Like getbundle, but taking a discovery.outgoing as an argument.
1985 """Like getbundle, but taking a discovery.outgoing as an argument.
1974
1986
1975 This is only implemented for local repos and reuses potentially
1987 This is only implemented for local repos and reuses potentially
1976 precomputed sets in outgoing."""
1988 precomputed sets in outgoing."""
1977 if not outgoing.missing:
1989 if not outgoing.missing:
1978 return None
1990 return None
1979 return self._changegroupsubset(outgoing.common,
1991 return self._changegroupsubset(outgoing.common,
1980 outgoing.missing,
1992 outgoing.missing,
1981 outgoing.missingheads,
1993 outgoing.missingheads,
1982 source)
1994 source)
1983
1995
1984 def getbundle(self, source, heads=None, common=None):
1996 def getbundle(self, source, heads=None, common=None):
1985 """Like changegroupsubset, but returns the set difference between the
1997 """Like changegroupsubset, but returns the set difference between the
1986 ancestors of heads and the ancestors common.
1998 ancestors of heads and the ancestors common.
1987
1999
1988 If heads is None, use the local heads. If common is None, use [nullid].
2000 If heads is None, use the local heads. If common is None, use [nullid].
1989
2001
1990 The nodes in common might not all be known locally due to the way the
2002 The nodes in common might not all be known locally due to the way the
1991 current discovery protocol works.
2003 current discovery protocol works.
1992 """
2004 """
1993 cl = self.changelog
2005 cl = self.changelog
1994 if common:
2006 if common:
1995 hasnode = cl.hasnode
2007 hasnode = cl.hasnode
1996 common = [n for n in common if hasnode(n)]
2008 common = [n for n in common if hasnode(n)]
1997 else:
2009 else:
1998 common = [nullid]
2010 common = [nullid]
1999 if not heads:
2011 if not heads:
2000 heads = cl.heads()
2012 heads = cl.heads()
2001 return self.getlocalbundle(source,
2013 return self.getlocalbundle(source,
2002 discovery.outgoing(cl, common, heads))
2014 discovery.outgoing(cl, common, heads))
2003
2015
2004 @unfilteredmethod
2016 @unfilteredmethod
2005 def _changegroupsubset(self, commonrevs, csets, heads, source):
2017 def _changegroupsubset(self, commonrevs, csets, heads, source):
2006
2018
2007 cl = self.changelog
2019 cl = self.changelog
2008 mf = self.manifest
2020 mf = self.manifest
2009 mfs = {} # needed manifests
2021 mfs = {} # needed manifests
2010 fnodes = {} # needed file nodes
2022 fnodes = {} # needed file nodes
2011 changedfiles = set()
2023 changedfiles = set()
2012 fstate = ['', {}]
2024 fstate = ['', {}]
2013 count = [0, 0]
2025 count = [0, 0]
2014
2026
2015 # can we go through the fast path ?
2027 # can we go through the fast path ?
2016 heads.sort()
2028 heads.sort()
2017 if heads == sorted(self.heads()):
2029 if heads == sorted(self.heads()):
2018 return self._changegroup(csets, source)
2030 return self._changegroup(csets, source)
2019
2031
2020 # slow path
2032 # slow path
2021 self.hook('preoutgoing', throw=True, source=source)
2033 self.hook('preoutgoing', throw=True, source=source)
2022 self.changegroupinfo(csets, source)
2034 self.changegroupinfo(csets, source)
2023
2035
2024 # filter any nodes that claim to be part of the known set
2036 # filter any nodes that claim to be part of the known set
2025 def prune(revlog, missing):
2037 def prune(revlog, missing):
2026 rr, rl = revlog.rev, revlog.linkrev
2038 rr, rl = revlog.rev, revlog.linkrev
2027 return [n for n in missing
2039 return [n for n in missing
2028 if rl(rr(n)) not in commonrevs]
2040 if rl(rr(n)) not in commonrevs]
2029
2041
2030 progress = self.ui.progress
2042 progress = self.ui.progress
2031 _bundling = _('bundling')
2043 _bundling = _('bundling')
2032 _changesets = _('changesets')
2044 _changesets = _('changesets')
2033 _manifests = _('manifests')
2045 _manifests = _('manifests')
2034 _files = _('files')
2046 _files = _('files')
2035
2047
2036 def lookup(revlog, x):
2048 def lookup(revlog, x):
2037 if revlog == cl:
2049 if revlog == cl:
2038 c = cl.read(x)
2050 c = cl.read(x)
2039 changedfiles.update(c[3])
2051 changedfiles.update(c[3])
2040 mfs.setdefault(c[0], x)
2052 mfs.setdefault(c[0], x)
2041 count[0] += 1
2053 count[0] += 1
2042 progress(_bundling, count[0],
2054 progress(_bundling, count[0],
2043 unit=_changesets, total=count[1])
2055 unit=_changesets, total=count[1])
2044 return x
2056 return x
2045 elif revlog == mf:
2057 elif revlog == mf:
2046 clnode = mfs[x]
2058 clnode = mfs[x]
2047 mdata = mf.readfast(x)
2059 mdata = mf.readfast(x)
2048 for f, n in mdata.iteritems():
2060 for f, n in mdata.iteritems():
2049 if f in changedfiles:
2061 if f in changedfiles:
2050 fnodes[f].setdefault(n, clnode)
2062 fnodes[f].setdefault(n, clnode)
2051 count[0] += 1
2063 count[0] += 1
2052 progress(_bundling, count[0],
2064 progress(_bundling, count[0],
2053 unit=_manifests, total=count[1])
2065 unit=_manifests, total=count[1])
2054 return clnode
2066 return clnode
2055 else:
2067 else:
2056 progress(_bundling, count[0], item=fstate[0],
2068 progress(_bundling, count[0], item=fstate[0],
2057 unit=_files, total=count[1])
2069 unit=_files, total=count[1])
2058 return fstate[1][x]
2070 return fstate[1][x]
2059
2071
2060 bundler = changegroup.bundle10(lookup)
2072 bundler = changegroup.bundle10(lookup)
2061 reorder = self.ui.config('bundle', 'reorder', 'auto')
2073 reorder = self.ui.config('bundle', 'reorder', 'auto')
2062 if reorder == 'auto':
2074 if reorder == 'auto':
2063 reorder = None
2075 reorder = None
2064 else:
2076 else:
2065 reorder = util.parsebool(reorder)
2077 reorder = util.parsebool(reorder)
2066
2078
2067 def gengroup():
2079 def gengroup():
2068 # Create a changenode group generator that will call our functions
2080 # Create a changenode group generator that will call our functions
2069 # back to lookup the owning changenode and collect information.
2081 # back to lookup the owning changenode and collect information.
2070 count[:] = [0, len(csets)]
2082 count[:] = [0, len(csets)]
2071 for chunk in cl.group(csets, bundler, reorder=reorder):
2083 for chunk in cl.group(csets, bundler, reorder=reorder):
2072 yield chunk
2084 yield chunk
2073 progress(_bundling, None)
2085 progress(_bundling, None)
2074
2086
2075 # Create a generator for the manifestnodes that calls our lookup
2087 # Create a generator for the manifestnodes that calls our lookup
2076 # and data collection functions back.
2088 # and data collection functions back.
2077 for f in changedfiles:
2089 for f in changedfiles:
2078 fnodes[f] = {}
2090 fnodes[f] = {}
2079 count[:] = [0, len(mfs)]
2091 count[:] = [0, len(mfs)]
2080 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2092 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2081 yield chunk
2093 yield chunk
2082 progress(_bundling, None)
2094 progress(_bundling, None)
2083
2095
2084 mfs.clear()
2096 mfs.clear()
2085
2097
2086 # Go through all our files in order sorted by name.
2098 # Go through all our files in order sorted by name.
2087 count[:] = [0, len(changedfiles)]
2099 count[:] = [0, len(changedfiles)]
2088 for fname in sorted(changedfiles):
2100 for fname in sorted(changedfiles):
2089 filerevlog = self.file(fname)
2101 filerevlog = self.file(fname)
2090 if not len(filerevlog):
2102 if not len(filerevlog):
2091 raise util.Abort(_("empty or missing revlog for %s")
2103 raise util.Abort(_("empty or missing revlog for %s")
2092 % fname)
2104 % fname)
2093 fstate[0] = fname
2105 fstate[0] = fname
2094 fstate[1] = fnodes.pop(fname, {})
2106 fstate[1] = fnodes.pop(fname, {})
2095
2107
2096 nodelist = prune(filerevlog, fstate[1])
2108 nodelist = prune(filerevlog, fstate[1])
2097 if nodelist:
2109 if nodelist:
2098 count[0] += 1
2110 count[0] += 1
2099 yield bundler.fileheader(fname)
2111 yield bundler.fileheader(fname)
2100 for chunk in filerevlog.group(nodelist, bundler, reorder):
2112 for chunk in filerevlog.group(nodelist, bundler, reorder):
2101 yield chunk
2113 yield chunk
2102
2114
2103 # Signal that no more groups are left.
2115 # Signal that no more groups are left.
2104 yield bundler.close()
2116 yield bundler.close()
2105 progress(_bundling, None)
2117 progress(_bundling, None)
2106
2118
2107 if csets:
2119 if csets:
2108 self.hook('outgoing', node=hex(csets[0]), source=source)
2120 self.hook('outgoing', node=hex(csets[0]), source=source)
2109
2121
2110 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2122 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2111
2123
2112 def changegroup(self, basenodes, source):
2124 def changegroup(self, basenodes, source):
2113 # to avoid a race we use changegroupsubset() (issue1320)
2125 # to avoid a race we use changegroupsubset() (issue1320)
2114 return self.changegroupsubset(basenodes, self.heads(), source)
2126 return self.changegroupsubset(basenodes, self.heads(), source)
2115
2127
2116 @unfilteredmethod
2128 @unfilteredmethod
2117 def _changegroup(self, nodes, source):
2129 def _changegroup(self, nodes, source):
2118 """Compute the changegroup of all nodes that we have that a recipient
2130 """Compute the changegroup of all nodes that we have that a recipient
2119 doesn't. Return a chunkbuffer object whose read() method will return
2131 doesn't. Return a chunkbuffer object whose read() method will return
2120 successive changegroup chunks.
2132 successive changegroup chunks.
2121
2133
2122 This is much easier than the previous function as we can assume that
2134 This is much easier than the previous function as we can assume that
2123 the recipient has any changenode we aren't sending them.
2135 the recipient has any changenode we aren't sending them.
2124
2136
2125 nodes is the set of nodes to send"""
2137 nodes is the set of nodes to send"""
2126
2138
2127 cl = self.changelog
2139 cl = self.changelog
2128 mf = self.manifest
2140 mf = self.manifest
2129 mfs = {}
2141 mfs = {}
2130 changedfiles = set()
2142 changedfiles = set()
2131 fstate = ['']
2143 fstate = ['']
2132 count = [0, 0]
2144 count = [0, 0]
2133
2145
2134 self.hook('preoutgoing', throw=True, source=source)
2146 self.hook('preoutgoing', throw=True, source=source)
2135 self.changegroupinfo(nodes, source)
2147 self.changegroupinfo(nodes, source)
2136
2148
2137 revset = set([cl.rev(n) for n in nodes])
2149 revset = set([cl.rev(n) for n in nodes])
2138
2150
2139 def gennodelst(log):
2151 def gennodelst(log):
2140 ln, llr = log.node, log.linkrev
2152 ln, llr = log.node, log.linkrev
2141 return [ln(r) for r in log if llr(r) in revset]
2153 return [ln(r) for r in log if llr(r) in revset]
2142
2154
2143 progress = self.ui.progress
2155 progress = self.ui.progress
2144 _bundling = _('bundling')
2156 _bundling = _('bundling')
2145 _changesets = _('changesets')
2157 _changesets = _('changesets')
2146 _manifests = _('manifests')
2158 _manifests = _('manifests')
2147 _files = _('files')
2159 _files = _('files')
2148
2160
2149 def lookup(revlog, x):
2161 def lookup(revlog, x):
2150 if revlog == cl:
2162 if revlog == cl:
2151 c = cl.read(x)
2163 c = cl.read(x)
2152 changedfiles.update(c[3])
2164 changedfiles.update(c[3])
2153 mfs.setdefault(c[0], x)
2165 mfs.setdefault(c[0], x)
2154 count[0] += 1
2166 count[0] += 1
2155 progress(_bundling, count[0],
2167 progress(_bundling, count[0],
2156 unit=_changesets, total=count[1])
2168 unit=_changesets, total=count[1])
2157 return x
2169 return x
2158 elif revlog == mf:
2170 elif revlog == mf:
2159 count[0] += 1
2171 count[0] += 1
2160 progress(_bundling, count[0],
2172 progress(_bundling, count[0],
2161 unit=_manifests, total=count[1])
2173 unit=_manifests, total=count[1])
2162 return cl.node(revlog.linkrev(revlog.rev(x)))
2174 return cl.node(revlog.linkrev(revlog.rev(x)))
2163 else:
2175 else:
2164 progress(_bundling, count[0], item=fstate[0],
2176 progress(_bundling, count[0], item=fstate[0],
2165 total=count[1], unit=_files)
2177 total=count[1], unit=_files)
2166 return cl.node(revlog.linkrev(revlog.rev(x)))
2178 return cl.node(revlog.linkrev(revlog.rev(x)))
2167
2179
2168 bundler = changegroup.bundle10(lookup)
2180 bundler = changegroup.bundle10(lookup)
2169 reorder = self.ui.config('bundle', 'reorder', 'auto')
2181 reorder = self.ui.config('bundle', 'reorder', 'auto')
2170 if reorder == 'auto':
2182 if reorder == 'auto':
2171 reorder = None
2183 reorder = None
2172 else:
2184 else:
2173 reorder = util.parsebool(reorder)
2185 reorder = util.parsebool(reorder)
2174
2186
2175 def gengroup():
2187 def gengroup():
2176 '''yield a sequence of changegroup chunks (strings)'''
2188 '''yield a sequence of changegroup chunks (strings)'''
2177 # construct a list of all changed files
2189 # construct a list of all changed files
2178
2190
2179 count[:] = [0, len(nodes)]
2191 count[:] = [0, len(nodes)]
2180 for chunk in cl.group(nodes, bundler, reorder=reorder):
2192 for chunk in cl.group(nodes, bundler, reorder=reorder):
2181 yield chunk
2193 yield chunk
2182 progress(_bundling, None)
2194 progress(_bundling, None)
2183
2195
2184 count[:] = [0, len(mfs)]
2196 count[:] = [0, len(mfs)]
2185 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2197 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2186 yield chunk
2198 yield chunk
2187 progress(_bundling, None)
2199 progress(_bundling, None)
2188
2200
2189 count[:] = [0, len(changedfiles)]
2201 count[:] = [0, len(changedfiles)]
2190 for fname in sorted(changedfiles):
2202 for fname in sorted(changedfiles):
2191 filerevlog = self.file(fname)
2203 filerevlog = self.file(fname)
2192 if not len(filerevlog):
2204 if not len(filerevlog):
2193 raise util.Abort(_("empty or missing revlog for %s")
2205 raise util.Abort(_("empty or missing revlog for %s")
2194 % fname)
2206 % fname)
2195 fstate[0] = fname
2207 fstate[0] = fname
2196 nodelist = gennodelst(filerevlog)
2208 nodelist = gennodelst(filerevlog)
2197 if nodelist:
2209 if nodelist:
2198 count[0] += 1
2210 count[0] += 1
2199 yield bundler.fileheader(fname)
2211 yield bundler.fileheader(fname)
2200 for chunk in filerevlog.group(nodelist, bundler, reorder):
2212 for chunk in filerevlog.group(nodelist, bundler, reorder):
2201 yield chunk
2213 yield chunk
2202 yield bundler.close()
2214 yield bundler.close()
2203 progress(_bundling, None)
2215 progress(_bundling, None)
2204
2216
2205 if nodes:
2217 if nodes:
2206 self.hook('outgoing', node=hex(nodes[0]), source=source)
2218 self.hook('outgoing', node=hex(nodes[0]), source=source)
2207
2219
2208 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2220 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2209
2221
2210 @unfilteredmethod
2222 @unfilteredmethod
2211 def addchangegroup(self, source, srctype, url, emptyok=False):
2223 def addchangegroup(self, source, srctype, url, emptyok=False):
2212 """Add the changegroup returned by source.read() to this repo.
2224 """Add the changegroup returned by source.read() to this repo.
2213 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2225 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2214 the URL of the repo where this changegroup is coming from.
2226 the URL of the repo where this changegroup is coming from.
2215
2227
2216 Return an integer summarizing the change to this repo:
2228 Return an integer summarizing the change to this repo:
2217 - nothing changed or no source: 0
2229 - nothing changed or no source: 0
2218 - more heads than before: 1+added heads (2..n)
2230 - more heads than before: 1+added heads (2..n)
2219 - fewer heads than before: -1-removed heads (-2..-n)
2231 - fewer heads than before: -1-removed heads (-2..-n)
2220 - number of heads stays the same: 1
2232 - number of heads stays the same: 1
2221 """
2233 """
2222 def csmap(x):
2234 def csmap(x):
2223 self.ui.debug("add changeset %s\n" % short(x))
2235 self.ui.debug("add changeset %s\n" % short(x))
2224 return len(cl)
2236 return len(cl)
2225
2237
2226 def revmap(x):
2238 def revmap(x):
2227 return cl.rev(x)
2239 return cl.rev(x)
2228
2240
2229 if not source:
2241 if not source:
2230 return 0
2242 return 0
2231
2243
2232 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2244 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2233
2245
2234 changesets = files = revisions = 0
2246 changesets = files = revisions = 0
2235 efiles = set()
2247 efiles = set()
2236
2248
2237 # write changelog data to temp files so concurrent readers will not see
2249 # write changelog data to temp files so concurrent readers will not see
2238 # inconsistent view
2250 # inconsistent view
2239 cl = self.changelog
2251 cl = self.changelog
2240 cl.delayupdate()
2252 cl.delayupdate()
2241 oldheads = cl.heads()
2253 oldheads = cl.heads()
2242
2254
2243 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2255 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2244 try:
2256 try:
2245 trp = weakref.proxy(tr)
2257 trp = weakref.proxy(tr)
2246 # pull off the changeset group
2258 # pull off the changeset group
2247 self.ui.status(_("adding changesets\n"))
2259 self.ui.status(_("adding changesets\n"))
2248 clstart = len(cl)
2260 clstart = len(cl)
2249 class prog(object):
2261 class prog(object):
2250 step = _('changesets')
2262 step = _('changesets')
2251 count = 1
2263 count = 1
2252 ui = self.ui
2264 ui = self.ui
2253 total = None
2265 total = None
2254 def __call__(self):
2266 def __call__(self):
2255 self.ui.progress(self.step, self.count, unit=_('chunks'),
2267 self.ui.progress(self.step, self.count, unit=_('chunks'),
2256 total=self.total)
2268 total=self.total)
2257 self.count += 1
2269 self.count += 1
2258 pr = prog()
2270 pr = prog()
2259 source.callback = pr
2271 source.callback = pr
2260
2272
2261 source.changelogheader()
2273 source.changelogheader()
2262 srccontent = cl.addgroup(source, csmap, trp)
2274 srccontent = cl.addgroup(source, csmap, trp)
2263 if not (srccontent or emptyok):
2275 if not (srccontent or emptyok):
2264 raise util.Abort(_("received changelog group is empty"))
2276 raise util.Abort(_("received changelog group is empty"))
2265 clend = len(cl)
2277 clend = len(cl)
2266 changesets = clend - clstart
2278 changesets = clend - clstart
2267 for c in xrange(clstart, clend):
2279 for c in xrange(clstart, clend):
2268 efiles.update(self[c].files())
2280 efiles.update(self[c].files())
2269 efiles = len(efiles)
2281 efiles = len(efiles)
2270 self.ui.progress(_('changesets'), None)
2282 self.ui.progress(_('changesets'), None)
2271
2283
2272 # pull off the manifest group
2284 # pull off the manifest group
2273 self.ui.status(_("adding manifests\n"))
2285 self.ui.status(_("adding manifests\n"))
2274 pr.step = _('manifests')
2286 pr.step = _('manifests')
2275 pr.count = 1
2287 pr.count = 1
2276 pr.total = changesets # manifests <= changesets
2288 pr.total = changesets # manifests <= changesets
2277 # no need to check for empty manifest group here:
2289 # no need to check for empty manifest group here:
2278 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2290 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2279 # no new manifest will be created and the manifest group will
2291 # no new manifest will be created and the manifest group will
2280 # be empty during the pull
2292 # be empty during the pull
2281 source.manifestheader()
2293 source.manifestheader()
2282 self.manifest.addgroup(source, revmap, trp)
2294 self.manifest.addgroup(source, revmap, trp)
2283 self.ui.progress(_('manifests'), None)
2295 self.ui.progress(_('manifests'), None)
2284
2296
2285 needfiles = {}
2297 needfiles = {}
2286 if self.ui.configbool('server', 'validate', default=False):
2298 if self.ui.configbool('server', 'validate', default=False):
2287 # validate incoming csets have their manifests
2299 # validate incoming csets have their manifests
2288 for cset in xrange(clstart, clend):
2300 for cset in xrange(clstart, clend):
2289 mfest = self.changelog.read(self.changelog.node(cset))[0]
2301 mfest = self.changelog.read(self.changelog.node(cset))[0]
2290 mfest = self.manifest.readdelta(mfest)
2302 mfest = self.manifest.readdelta(mfest)
2291 # store file nodes we must see
2303 # store file nodes we must see
2292 for f, n in mfest.iteritems():
2304 for f, n in mfest.iteritems():
2293 needfiles.setdefault(f, set()).add(n)
2305 needfiles.setdefault(f, set()).add(n)
2294
2306
2295 # process the files
2307 # process the files
2296 self.ui.status(_("adding file changes\n"))
2308 self.ui.status(_("adding file changes\n"))
2297 pr.step = _('files')
2309 pr.step = _('files')
2298 pr.count = 1
2310 pr.count = 1
2299 pr.total = efiles
2311 pr.total = efiles
2300 source.callback = None
2312 source.callback = None
2301
2313
2302 while True:
2314 while True:
2303 chunkdata = source.filelogheader()
2315 chunkdata = source.filelogheader()
2304 if not chunkdata:
2316 if not chunkdata:
2305 break
2317 break
2306 f = chunkdata["filename"]
2318 f = chunkdata["filename"]
2307 self.ui.debug("adding %s revisions\n" % f)
2319 self.ui.debug("adding %s revisions\n" % f)
2308 pr()
2320 pr()
2309 fl = self.file(f)
2321 fl = self.file(f)
2310 o = len(fl)
2322 o = len(fl)
2311 if not fl.addgroup(source, revmap, trp):
2323 if not fl.addgroup(source, revmap, trp):
2312 raise util.Abort(_("received file revlog group is empty"))
2324 raise util.Abort(_("received file revlog group is empty"))
2313 revisions += len(fl) - o
2325 revisions += len(fl) - o
2314 files += 1
2326 files += 1
2315 if f in needfiles:
2327 if f in needfiles:
2316 needs = needfiles[f]
2328 needs = needfiles[f]
2317 for new in xrange(o, len(fl)):
2329 for new in xrange(o, len(fl)):
2318 n = fl.node(new)
2330 n = fl.node(new)
2319 if n in needs:
2331 if n in needs:
2320 needs.remove(n)
2332 needs.remove(n)
2321 if not needs:
2333 if not needs:
2322 del needfiles[f]
2334 del needfiles[f]
2323 self.ui.progress(_('files'), None)
2335 self.ui.progress(_('files'), None)
2324
2336
2325 for f, needs in needfiles.iteritems():
2337 for f, needs in needfiles.iteritems():
2326 fl = self.file(f)
2338 fl = self.file(f)
2327 for n in needs:
2339 for n in needs:
2328 try:
2340 try:
2329 fl.rev(n)
2341 fl.rev(n)
2330 except error.LookupError:
2342 except error.LookupError:
2331 raise util.Abort(
2343 raise util.Abort(
2332 _('missing file data for %s:%s - run hg verify') %
2344 _('missing file data for %s:%s - run hg verify') %
2333 (f, hex(n)))
2345 (f, hex(n)))
2334
2346
2335 dh = 0
2347 dh = 0
2336 if oldheads:
2348 if oldheads:
2337 heads = cl.heads()
2349 heads = cl.heads()
2338 dh = len(heads) - len(oldheads)
2350 dh = len(heads) - len(oldheads)
2339 for h in heads:
2351 for h in heads:
2340 if h not in oldheads and self[h].closesbranch():
2352 if h not in oldheads and self[h].closesbranch():
2341 dh -= 1
2353 dh -= 1
2342 htext = ""
2354 htext = ""
2343 if dh:
2355 if dh:
2344 htext = _(" (%+d heads)") % dh
2356 htext = _(" (%+d heads)") % dh
2345
2357
2346 self.ui.status(_("added %d changesets"
2358 self.ui.status(_("added %d changesets"
2347 " with %d changes to %d files%s\n")
2359 " with %d changes to %d files%s\n")
2348 % (changesets, revisions, files, htext))
2360 % (changesets, revisions, files, htext))
2349 self.invalidatevolatilesets()
2361 self.invalidatevolatilesets()
2350
2362
2351 if changesets > 0:
2363 if changesets > 0:
2352 p = lambda: cl.writepending() and self.root or ""
2364 p = lambda: cl.writepending() and self.root or ""
2353 self.hook('pretxnchangegroup', throw=True,
2365 self.hook('pretxnchangegroup', throw=True,
2354 node=hex(cl.node(clstart)), source=srctype,
2366 node=hex(cl.node(clstart)), source=srctype,
2355 url=url, pending=p)
2367 url=url, pending=p)
2356
2368
2357 added = [cl.node(r) for r in xrange(clstart, clend)]
2369 added = [cl.node(r) for r in xrange(clstart, clend)]
2358 publishing = self.ui.configbool('phases', 'publish', True)
2370 publishing = self.ui.configbool('phases', 'publish', True)
2359 if srctype == 'push':
2371 if srctype == 'push':
2360 # Old server can not push the boundary themself.
2372 # Old server can not push the boundary themself.
2361 # New server won't push the boundary if changeset already
2373 # New server won't push the boundary if changeset already
2362 # existed locally as secrete
2374 # existed locally as secrete
2363 #
2375 #
2364 # We should not use added here but the list of all change in
2376 # We should not use added here but the list of all change in
2365 # the bundle
2377 # the bundle
2366 if publishing:
2378 if publishing:
2367 phases.advanceboundary(self, phases.public, srccontent)
2379 phases.advanceboundary(self, phases.public, srccontent)
2368 else:
2380 else:
2369 phases.advanceboundary(self, phases.draft, srccontent)
2381 phases.advanceboundary(self, phases.draft, srccontent)
2370 phases.retractboundary(self, phases.draft, added)
2382 phases.retractboundary(self, phases.draft, added)
2371 elif srctype != 'strip':
2383 elif srctype != 'strip':
2372 # publishing only alter behavior during push
2384 # publishing only alter behavior during push
2373 #
2385 #
2374 # strip should not touch boundary at all
2386 # strip should not touch boundary at all
2375 phases.retractboundary(self, phases.draft, added)
2387 phases.retractboundary(self, phases.draft, added)
2376
2388
2377 # make changelog see real files again
2389 # make changelog see real files again
2378 cl.finalize(trp)
2390 cl.finalize(trp)
2379
2391
2380 tr.close()
2392 tr.close()
2381
2393
2382 if changesets > 0:
2394 if changesets > 0:
2383 if srctype != 'strip':
2395 if srctype != 'strip':
2384 # During strip, branchcache is invalid but coming call to
2396 # During strip, branchcache is invalid but coming call to
2385 # `destroyed` will repair it.
2397 # `destroyed` will repair it.
2386 # In other case we can safely update cache on disk.
2398 # In other case we can safely update cache on disk.
2387 branchmap.updatecache(self)
2399 branchmap.updatecache(self)
2388 def runhooks():
2400 def runhooks():
2389 # forcefully update the on-disk branch cache
2401 # forcefully update the on-disk branch cache
2390 self.ui.debug("updating the branch cache\n")
2402 self.ui.debug("updating the branch cache\n")
2391 self.hook("changegroup", node=hex(cl.node(clstart)),
2403 self.hook("changegroup", node=hex(cl.node(clstart)),
2392 source=srctype, url=url)
2404 source=srctype, url=url)
2393
2405
2394 for n in added:
2406 for n in added:
2395 self.hook("incoming", node=hex(n), source=srctype,
2407 self.hook("incoming", node=hex(n), source=srctype,
2396 url=url)
2408 url=url)
2397 self._afterlock(runhooks)
2409 self._afterlock(runhooks)
2398
2410
2399 finally:
2411 finally:
2400 tr.release()
2412 tr.release()
2401 # never return 0 here:
2413 # never return 0 here:
2402 if dh < 0:
2414 if dh < 0:
2403 return dh - 1
2415 return dh - 1
2404 else:
2416 else:
2405 return dh + 1
2417 return dh + 1
2406
2418
2407 def stream_in(self, remote, requirements):
2419 def stream_in(self, remote, requirements):
2408 lock = self.lock()
2420 lock = self.lock()
2409 try:
2421 try:
2410 # Save remote branchmap. We will use it later
2422 # Save remote branchmap. We will use it later
2411 # to speed up branchcache creation
2423 # to speed up branchcache creation
2412 rbranchmap = None
2424 rbranchmap = None
2413 if remote.capable("branchmap"):
2425 if remote.capable("branchmap"):
2414 rbranchmap = remote.branchmap()
2426 rbranchmap = remote.branchmap()
2415
2427
2416 fp = remote.stream_out()
2428 fp = remote.stream_out()
2417 l = fp.readline()
2429 l = fp.readline()
2418 try:
2430 try:
2419 resp = int(l)
2431 resp = int(l)
2420 except ValueError:
2432 except ValueError:
2421 raise error.ResponseError(
2433 raise error.ResponseError(
2422 _('unexpected response from remote server:'), l)
2434 _('unexpected response from remote server:'), l)
2423 if resp == 1:
2435 if resp == 1:
2424 raise util.Abort(_('operation forbidden by server'))
2436 raise util.Abort(_('operation forbidden by server'))
2425 elif resp == 2:
2437 elif resp == 2:
2426 raise util.Abort(_('locking the remote repository failed'))
2438 raise util.Abort(_('locking the remote repository failed'))
2427 elif resp != 0:
2439 elif resp != 0:
2428 raise util.Abort(_('the server sent an unknown error code'))
2440 raise util.Abort(_('the server sent an unknown error code'))
2429 self.ui.status(_('streaming all changes\n'))
2441 self.ui.status(_('streaming all changes\n'))
2430 l = fp.readline()
2442 l = fp.readline()
2431 try:
2443 try:
2432 total_files, total_bytes = map(int, l.split(' ', 1))
2444 total_files, total_bytes = map(int, l.split(' ', 1))
2433 except (ValueError, TypeError):
2445 except (ValueError, TypeError):
2434 raise error.ResponseError(
2446 raise error.ResponseError(
2435 _('unexpected response from remote server:'), l)
2447 _('unexpected response from remote server:'), l)
2436 self.ui.status(_('%d files to transfer, %s of data\n') %
2448 self.ui.status(_('%d files to transfer, %s of data\n') %
2437 (total_files, util.bytecount(total_bytes)))
2449 (total_files, util.bytecount(total_bytes)))
2438 handled_bytes = 0
2450 handled_bytes = 0
2439 self.ui.progress(_('clone'), 0, total=total_bytes)
2451 self.ui.progress(_('clone'), 0, total=total_bytes)
2440 start = time.time()
2452 start = time.time()
2441 for i in xrange(total_files):
2453 for i in xrange(total_files):
2442 # XXX doesn't support '\n' or '\r' in filenames
2454 # XXX doesn't support '\n' or '\r' in filenames
2443 l = fp.readline()
2455 l = fp.readline()
2444 try:
2456 try:
2445 name, size = l.split('\0', 1)
2457 name, size = l.split('\0', 1)
2446 size = int(size)
2458 size = int(size)
2447 except (ValueError, TypeError):
2459 except (ValueError, TypeError):
2448 raise error.ResponseError(
2460 raise error.ResponseError(
2449 _('unexpected response from remote server:'), l)
2461 _('unexpected response from remote server:'), l)
2450 if self.ui.debugflag:
2462 if self.ui.debugflag:
2451 self.ui.debug('adding %s (%s)\n' %
2463 self.ui.debug('adding %s (%s)\n' %
2452 (name, util.bytecount(size)))
2464 (name, util.bytecount(size)))
2453 # for backwards compat, name was partially encoded
2465 # for backwards compat, name was partially encoded
2454 ofp = self.sopener(store.decodedir(name), 'w')
2466 ofp = self.sopener(store.decodedir(name), 'w')
2455 for chunk in util.filechunkiter(fp, limit=size):
2467 for chunk in util.filechunkiter(fp, limit=size):
2456 handled_bytes += len(chunk)
2468 handled_bytes += len(chunk)
2457 self.ui.progress(_('clone'), handled_bytes,
2469 self.ui.progress(_('clone'), handled_bytes,
2458 total=total_bytes)
2470 total=total_bytes)
2459 ofp.write(chunk)
2471 ofp.write(chunk)
2460 ofp.close()
2472 ofp.close()
2461 elapsed = time.time() - start
2473 elapsed = time.time() - start
2462 if elapsed <= 0:
2474 if elapsed <= 0:
2463 elapsed = 0.001
2475 elapsed = 0.001
2464 self.ui.progress(_('clone'), None)
2476 self.ui.progress(_('clone'), None)
2465 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2477 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2466 (util.bytecount(total_bytes), elapsed,
2478 (util.bytecount(total_bytes), elapsed,
2467 util.bytecount(total_bytes / elapsed)))
2479 util.bytecount(total_bytes / elapsed)))
2468
2480
2469 # new requirements = old non-format requirements +
2481 # new requirements = old non-format requirements +
2470 # new format-related
2482 # new format-related
2471 # requirements from the streamed-in repository
2483 # requirements from the streamed-in repository
2472 requirements.update(set(self.requirements) - self.supportedformats)
2484 requirements.update(set(self.requirements) - self.supportedformats)
2473 self._applyrequirements(requirements)
2485 self._applyrequirements(requirements)
2474 self._writerequirements()
2486 self._writerequirements()
2475
2487
2476 if rbranchmap:
2488 if rbranchmap:
2477 rbheads = []
2489 rbheads = []
2478 for bheads in rbranchmap.itervalues():
2490 for bheads in rbranchmap.itervalues():
2479 rbheads.extend(bheads)
2491 rbheads.extend(bheads)
2480
2492
2481 if rbheads:
2493 if rbheads:
2482 rtiprev = max((int(self.changelog.rev(node))
2494 rtiprev = max((int(self.changelog.rev(node))
2483 for node in rbheads))
2495 for node in rbheads))
2484 cache = branchmap.branchcache(rbranchmap,
2496 cache = branchmap.branchcache(rbranchmap,
2485 self[rtiprev].node(),
2497 self[rtiprev].node(),
2486 rtiprev)
2498 rtiprev)
2487 self._branchcaches[None] = cache
2499 self._branchcaches[None] = cache
2488 cache.write(self.unfiltered())
2500 cache.write(self.unfiltered())
2489 self.invalidate()
2501 self.invalidate()
2490 return len(self.heads()) + 1
2502 return len(self.heads()) + 1
2491 finally:
2503 finally:
2492 lock.release()
2504 lock.release()
2493
2505
2494 def clone(self, remote, heads=[], stream=False):
2506 def clone(self, remote, heads=[], stream=False):
2495 '''clone remote repository.
2507 '''clone remote repository.
2496
2508
2497 keyword arguments:
2509 keyword arguments:
2498 heads: list of revs to clone (forces use of pull)
2510 heads: list of revs to clone (forces use of pull)
2499 stream: use streaming clone if possible'''
2511 stream: use streaming clone if possible'''
2500
2512
2501 # now, all clients that can request uncompressed clones can
2513 # now, all clients that can request uncompressed clones can
2502 # read repo formats supported by all servers that can serve
2514 # read repo formats supported by all servers that can serve
2503 # them.
2515 # them.
2504
2516
2505 # if revlog format changes, client will have to check version
2517 # if revlog format changes, client will have to check version
2506 # and format flags on "stream" capability, and use
2518 # and format flags on "stream" capability, and use
2507 # uncompressed only if compatible.
2519 # uncompressed only if compatible.
2508
2520
2509 if not stream:
2521 if not stream:
2510 # if the server explicitly prefers to stream (for fast LANs)
2522 # if the server explicitly prefers to stream (for fast LANs)
2511 stream = remote.capable('stream-preferred')
2523 stream = remote.capable('stream-preferred')
2512
2524
2513 if stream and not heads:
2525 if stream and not heads:
2514 # 'stream' means remote revlog format is revlogv1 only
2526 # 'stream' means remote revlog format is revlogv1 only
2515 if remote.capable('stream'):
2527 if remote.capable('stream'):
2516 return self.stream_in(remote, set(('revlogv1',)))
2528 return self.stream_in(remote, set(('revlogv1',)))
2517 # otherwise, 'streamreqs' contains the remote revlog format
2529 # otherwise, 'streamreqs' contains the remote revlog format
2518 streamreqs = remote.capable('streamreqs')
2530 streamreqs = remote.capable('streamreqs')
2519 if streamreqs:
2531 if streamreqs:
2520 streamreqs = set(streamreqs.split(','))
2532 streamreqs = set(streamreqs.split(','))
2521 # if we support it, stream in and adjust our requirements
2533 # if we support it, stream in and adjust our requirements
2522 if not streamreqs - self.supportedformats:
2534 if not streamreqs - self.supportedformats:
2523 return self.stream_in(remote, streamreqs)
2535 return self.stream_in(remote, streamreqs)
2524 return self.pull(remote, heads)
2536 return self.pull(remote, heads)
2525
2537
2526 def pushkey(self, namespace, key, old, new):
2538 def pushkey(self, namespace, key, old, new):
2527 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2539 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2528 old=old, new=new)
2540 old=old, new=new)
2529 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2541 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2530 ret = pushkey.push(self, namespace, key, old, new)
2542 ret = pushkey.push(self, namespace, key, old, new)
2531 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2543 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2532 ret=ret)
2544 ret=ret)
2533 return ret
2545 return ret
2534
2546
2535 def listkeys(self, namespace):
2547 def listkeys(self, namespace):
2536 self.hook('prelistkeys', throw=True, namespace=namespace)
2548 self.hook('prelistkeys', throw=True, namespace=namespace)
2537 self.ui.debug('listing keys for "%s"\n' % namespace)
2549 self.ui.debug('listing keys for "%s"\n' % namespace)
2538 values = pushkey.list(self, namespace)
2550 values = pushkey.list(self, namespace)
2539 self.hook('listkeys', namespace=namespace, values=values)
2551 self.hook('listkeys', namespace=namespace, values=values)
2540 return values
2552 return values
2541
2553
2542 def debugwireargs(self, one, two, three=None, four=None, five=None):
2554 def debugwireargs(self, one, two, three=None, four=None, five=None):
2543 '''used to test argument passing over the wire'''
2555 '''used to test argument passing over the wire'''
2544 return "%s %s %s %s %s" % (one, two, three, four, five)
2556 return "%s %s %s %s %s" % (one, two, three, four, five)
2545
2557
2546 def savecommitmessage(self, text):
2558 def savecommitmessage(self, text):
2547 fp = self.opener('last-message.txt', 'wb')
2559 fp = self.opener('last-message.txt', 'wb')
2548 try:
2560 try:
2549 fp.write(text)
2561 fp.write(text)
2550 finally:
2562 finally:
2551 fp.close()
2563 fp.close()
2552 return self.pathto(fp.name[len(self.root) + 1:])
2564 return self.pathto(fp.name[len(self.root) + 1:])
2553
2565
2554 # used to avoid circular references so destructors work
2566 # used to avoid circular references so destructors work
2555 def aftertrans(files):
2567 def aftertrans(files):
2556 renamefiles = [tuple(t) for t in files]
2568 renamefiles = [tuple(t) for t in files]
2557 def a():
2569 def a():
2558 for src, dest in renamefiles:
2570 for src, dest in renamefiles:
2559 try:
2571 try:
2560 util.rename(src, dest)
2572 util.rename(src, dest)
2561 except OSError: # journal file does not yet exist
2573 except OSError: # journal file does not yet exist
2562 pass
2574 pass
2563 return a
2575 return a
2564
2576
2565 def undoname(fn):
2577 def undoname(fn):
2566 base, name = os.path.split(fn)
2578 base, name = os.path.split(fn)
2567 assert name.startswith('journal')
2579 assert name.startswith('journal')
2568 return os.path.join(base, name.replace('journal', 'undo', 1))
2580 return os.path.join(base, name.replace('journal', 'undo', 1))
2569
2581
2570 def instance(ui, path, create):
2582 def instance(ui, path, create):
2571 return localrepository(ui, util.urllocalpath(path), create)
2583 return localrepository(ui, util.urllocalpath(path), create)
2572
2584
2573 def islocal(path):
2585 def islocal(path):
2574 return True
2586 return True
@@ -1,1139 +1,1137 b''
1 $ cat <<EOF >> $HGRCPATH
1 $ cat <<EOF >> $HGRCPATH
2 > [extensions]
2 > [extensions]
3 > keyword =
3 > keyword =
4 > mq =
4 > mq =
5 > notify =
5 > notify =
6 > record =
6 > record =
7 > transplant =
7 > transplant =
8 > [ui]
8 > [ui]
9 > interactive = true
9 > interactive = true
10 > EOF
10 > EOF
11
11
12 hide outer repo
12 hide outer repo
13 $ hg init
13 $ hg init
14
14
15 Run kwdemo before [keyword] files are set up
15 Run kwdemo before [keyword] files are set up
16 as it would succeed without uisetup otherwise
16 as it would succeed without uisetup otherwise
17
17
18 $ hg --quiet kwdemo
18 $ hg --quiet kwdemo
19 [extensions]
19 [extensions]
20 keyword =
20 keyword =
21 [keyword]
21 [keyword]
22 demo.txt =
22 demo.txt =
23 [keywordset]
23 [keywordset]
24 svn = False
24 svn = False
25 [keywordmaps]
25 [keywordmaps]
26 Author = {author|user}
26 Author = {author|user}
27 Date = {date|utcdate}
27 Date = {date|utcdate}
28 Header = {root}/{file},v {node|short} {date|utcdate} {author|user}
28 Header = {root}/{file},v {node|short} {date|utcdate} {author|user}
29 Id = {file|basename},v {node|short} {date|utcdate} {author|user}
29 Id = {file|basename},v {node|short} {date|utcdate} {author|user}
30 RCSFile = {file|basename},v
30 RCSFile = {file|basename},v
31 RCSfile = {file|basename},v
31 RCSfile = {file|basename},v
32 Revision = {node|short}
32 Revision = {node|short}
33 Source = {root}/{file},v
33 Source = {root}/{file},v
34 $Author: test $
34 $Author: test $
35 $Date: ????/??/?? ??:??:?? $ (glob)
35 $Date: ????/??/?? ??:??:?? $ (glob)
36 $Header: */demo.txt,v ???????????? ????/??/?? ??:??:?? test $ (glob)
36 $Header: */demo.txt,v ???????????? ????/??/?? ??:??:?? test $ (glob)
37 $Id: demo.txt,v ???????????? ????/??/?? ??:??:?? test $ (glob)
37 $Id: demo.txt,v ???????????? ????/??/?? ??:??:?? test $ (glob)
38 $RCSFile: demo.txt,v $
38 $RCSFile: demo.txt,v $
39 $RCSfile: demo.txt,v $
39 $RCSfile: demo.txt,v $
40 $Revision: ???????????? $ (glob)
40 $Revision: ???????????? $ (glob)
41 $Source: */demo.txt,v $ (glob)
41 $Source: */demo.txt,v $ (glob)
42
42
43 $ hg --quiet kwdemo "Branch = {branches}"
43 $ hg --quiet kwdemo "Branch = {branches}"
44 [extensions]
44 [extensions]
45 keyword =
45 keyword =
46 [keyword]
46 [keyword]
47 demo.txt =
47 demo.txt =
48 [keywordset]
48 [keywordset]
49 svn = False
49 svn = False
50 [keywordmaps]
50 [keywordmaps]
51 Branch = {branches}
51 Branch = {branches}
52 $Branch: demobranch $
52 $Branch: demobranch $
53
53
54 $ cat <<EOF >> $HGRCPATH
54 $ cat <<EOF >> $HGRCPATH
55 > [keyword]
55 > [keyword]
56 > ** =
56 > ** =
57 > b = ignore
57 > b = ignore
58 > i = ignore
58 > i = ignore
59 > [hooks]
59 > [hooks]
60 > EOF
60 > EOF
61 $ cp $HGRCPATH $HGRCPATH.nohooks
61 $ cp $HGRCPATH $HGRCPATH.nohooks
62 > cat <<EOF >> $HGRCPATH
62 > cat <<EOF >> $HGRCPATH
63 > commit=
63 > commit=
64 > commit.test=cp a hooktest
64 > commit.test=cp a hooktest
65 > EOF
65 > EOF
66
66
67 $ hg init Test-bndl
67 $ hg init Test-bndl
68 $ cd Test-bndl
68 $ cd Test-bndl
69
69
70 kwshrink should exit silently in empty/invalid repo
70 kwshrink should exit silently in empty/invalid repo
71
71
72 $ hg kwshrink
72 $ hg kwshrink
73
73
74 Symlinks cannot be created on Windows.
74 Symlinks cannot be created on Windows.
75 A bundle to test this was made with:
75 A bundle to test this was made with:
76 hg init t
76 hg init t
77 cd t
77 cd t
78 echo a > a
78 echo a > a
79 ln -s a sym
79 ln -s a sym
80 hg add sym
80 hg add sym
81 hg ci -m addsym -u mercurial
81 hg ci -m addsym -u mercurial
82 hg bundle --base null ../test-keyword.hg
82 hg bundle --base null ../test-keyword.hg
83
83
84 $ hg pull -u "$TESTDIR"/bundles/test-keyword.hg
84 $ hg pull -u "$TESTDIR"/bundles/test-keyword.hg
85 pulling from *test-keyword.hg (glob)
85 pulling from *test-keyword.hg (glob)
86 requesting all changes
86 requesting all changes
87 adding changesets
87 adding changesets
88 adding manifests
88 adding manifests
89 adding file changes
89 adding file changes
90 added 1 changesets with 1 changes to 1 files
90 added 1 changesets with 1 changes to 1 files
91 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
91 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
92
92
93 $ echo 'expand $Id$' > a
93 $ echo 'expand $Id$' > a
94 $ echo 'do not process $Id:' >> a
94 $ echo 'do not process $Id:' >> a
95 $ echo 'xxx $' >> a
95 $ echo 'xxx $' >> a
96 $ echo 'ignore $Id$' > b
96 $ echo 'ignore $Id$' > b
97
97
98 Output files as they were created
98 Output files as they were created
99
99
100 $ cat a b
100 $ cat a b
101 expand $Id$
101 expand $Id$
102 do not process $Id:
102 do not process $Id:
103 xxx $
103 xxx $
104 ignore $Id$
104 ignore $Id$
105
105
106 no kwfiles
106 no kwfiles
107
107
108 $ hg kwfiles
108 $ hg kwfiles
109
109
110 untracked candidates
110 untracked candidates
111
111
112 $ hg -v kwfiles --unknown
112 $ hg -v kwfiles --unknown
113 k a
113 k a
114
114
115 Add files and check status
115 Add files and check status
116
116
117 $ hg addremove
117 $ hg addremove
118 adding a
118 adding a
119 adding b
119 adding b
120 $ hg status
120 $ hg status
121 A a
121 A a
122 A b
122 A b
123
123
124
124
125 Default keyword expansion including commit hook
125 Default keyword expansion including commit hook
126 Interrupted commit should not change state or run commit hook
126 Interrupted commit should not change state or run commit hook
127
127
128 $ hg --debug commit
128 $ hg --debug commit
129 abort: empty commit message
129 abort: empty commit message
130 [255]
130 [255]
131 $ hg status
131 $ hg status
132 A a
132 A a
133 A b
133 A b
134
134
135 Commit with several checks
135 Commit with several checks
136
136
137 $ hg --debug commit -mabsym -u 'User Name <user@example.com>'
137 $ hg --debug commit -mabsym -u 'User Name <user@example.com>'
138 a
138 a
139 b
139 b
140 overwriting a expanding keywords
140 overwriting a expanding keywords
141 running hook commit.test: cp a hooktest
141 running hook commit.test: cp a hooktest
142 committed changeset 1:ef63ca68695bc9495032c6fda1350c71e6d256e9
142 committed changeset 1:ef63ca68695bc9495032c6fda1350c71e6d256e9
143 $ hg status
143 $ hg status
144 ? hooktest
144 ? hooktest
145 $ hg debugrebuildstate
145 $ hg debugrebuildstate
146 $ hg --quiet identify
146 $ hg --quiet identify
147 ef63ca68695b
147 ef63ca68695b
148
148
149 cat files in working directory with keywords expanded
149 cat files in working directory with keywords expanded
150
150
151 $ cat a b
151 $ cat a b
152 expand $Id: a,v ef63ca68695b 1970/01/01 00:00:00 user $
152 expand $Id: a,v ef63ca68695b 1970/01/01 00:00:00 user $
153 do not process $Id:
153 do not process $Id:
154 xxx $
154 xxx $
155 ignore $Id$
155 ignore $Id$
156
156
157 hg cat files and symlink, no expansion
157 hg cat files and symlink, no expansion
158
158
159 $ hg cat sym a b && echo
159 $ hg cat sym a b && echo
160 expand $Id: a,v ef63ca68695b 1970/01/01 00:00:00 user $
160 expand $Id: a,v ef63ca68695b 1970/01/01 00:00:00 user $
161 do not process $Id:
161 do not process $Id:
162 xxx $
162 xxx $
163 ignore $Id$
163 ignore $Id$
164 a
164 a
165
165
166 $ diff a hooktest
166 $ diff a hooktest
167
167
168 $ cp $HGRCPATH.nohooks $HGRCPATH
168 $ cp $HGRCPATH.nohooks $HGRCPATH
169 $ rm hooktest
169 $ rm hooktest
170
170
171 hg status of kw-ignored binary file starting with '\1\n'
171 hg status of kw-ignored binary file starting with '\1\n'
172
172
173 >>> open("i", "wb").write("\1\nfoo")
173 >>> open("i", "wb").write("\1\nfoo")
174 $ hg -q commit -Am metasep i
174 $ hg -q commit -Am metasep i
175 $ hg status
175 $ hg status
176 >>> open("i", "wb").write("\1\nbar")
176 >>> open("i", "wb").write("\1\nbar")
177 $ hg status
177 $ hg status
178 M i
178 M i
179 $ hg -q commit -m "modify metasep" i
179 $ hg -q commit -m "modify metasep" i
180 $ hg status --rev 2:3
180 $ hg status --rev 2:3
181 M i
181 M i
182 $ touch empty
182 $ touch empty
183 $ hg -q commit -A -m "another file"
183 $ hg -q commit -A -m "another file"
184 $ hg status -A --rev 3:4 i
184 $ hg status -A --rev 3:4 i
185 C i
185 C i
186
186
187 $ hg -q strip -n 2
187 $ hg -q strip -n 2
188
188
189 Test hook execution
189 Test hook execution
190
190
191 bundle
191 bundle
192
192
193 $ hg bundle --base null ../kw.hg
193 $ hg bundle --base null ../kw.hg
194 2 changesets found
194 2 changesets found
195 $ cd ..
195 $ cd ..
196 $ hg init Test
196 $ hg init Test
197 $ cd Test
197 $ cd Test
198
198
199 Notify on pull to check whether keywords stay as is in email
199 Notify on pull to check whether keywords stay as is in email
200 ie. if patch.diff wrapper acts as it should
200 ie. if patch.diff wrapper acts as it should
201
201
202 $ cat <<EOF >> $HGRCPATH
202 $ cat <<EOF >> $HGRCPATH
203 > [hooks]
203 > [hooks]
204 > incoming.notify = python:hgext.notify.hook
204 > incoming.notify = python:hgext.notify.hook
205 > [notify]
205 > [notify]
206 > sources = pull
206 > sources = pull
207 > diffstat = False
207 > diffstat = False
208 > maxsubject = 15
208 > maxsubject = 15
209 > [reposubs]
209 > [reposubs]
210 > * = Test
210 > * = Test
211 > EOF
211 > EOF
212
212
213 Pull from bundle and trigger notify
213 Pull from bundle and trigger notify
214
214
215 $ hg pull -u ../kw.hg
215 $ hg pull -u ../kw.hg
216 pulling from ../kw.hg
216 pulling from ../kw.hg
217 requesting all changes
217 requesting all changes
218 adding changesets
218 adding changesets
219 adding manifests
219 adding manifests
220 adding file changes
220 adding file changes
221 added 2 changesets with 3 changes to 3 files
221 added 2 changesets with 3 changes to 3 files
222 Content-Type: text/plain; charset="us-ascii"
222 Content-Type: text/plain; charset="us-ascii"
223 MIME-Version: 1.0
223 MIME-Version: 1.0
224 Content-Transfer-Encoding: 7bit
224 Content-Transfer-Encoding: 7bit
225 Date: * (glob)
225 Date: * (glob)
226 Subject: changeset in...
226 Subject: changeset in...
227 From: mercurial
227 From: mercurial
228 X-Hg-Notification: changeset a2392c293916
228 X-Hg-Notification: changeset a2392c293916
229 Message-Id: <hg.a2392c293916*> (glob)
229 Message-Id: <hg.a2392c293916*> (glob)
230 To: Test
230 To: Test
231
231
232 changeset a2392c293916 in $TESTTMP/Test (glob)
232 changeset a2392c293916 in $TESTTMP/Test (glob)
233 details: $TESTTMP/Test?cmd=changeset;node=a2392c293916
233 details: $TESTTMP/Test?cmd=changeset;node=a2392c293916
234 description:
234 description:
235 addsym
235 addsym
236
236
237 diffs (6 lines):
237 diffs (6 lines):
238
238
239 diff -r 000000000000 -r a2392c293916 sym
239 diff -r 000000000000 -r a2392c293916 sym
240 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
240 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
241 +++ b/sym Sat Feb 09 20:25:47 2008 +0100
241 +++ b/sym Sat Feb 09 20:25:47 2008 +0100
242 @@ -0,0 +1,1 @@
242 @@ -0,0 +1,1 @@
243 +a
243 +a
244 \ No newline at end of file
244 \ No newline at end of file
245 Content-Type: text/plain; charset="us-ascii"
245 Content-Type: text/plain; charset="us-ascii"
246 MIME-Version: 1.0
246 MIME-Version: 1.0
247 Content-Transfer-Encoding: 7bit
247 Content-Transfer-Encoding: 7bit
248 Date:* (glob)
248 Date:* (glob)
249 Subject: changeset in...
249 Subject: changeset in...
250 From: User Name <user@example.com>
250 From: User Name <user@example.com>
251 X-Hg-Notification: changeset ef63ca68695b
251 X-Hg-Notification: changeset ef63ca68695b
252 Message-Id: <hg.ef63ca68695b*> (glob)
252 Message-Id: <hg.ef63ca68695b*> (glob)
253 To: Test
253 To: Test
254
254
255 changeset ef63ca68695b in $TESTTMP/Test (glob)
255 changeset ef63ca68695b in $TESTTMP/Test (glob)
256 details: $TESTTMP/Test?cmd=changeset;node=ef63ca68695b
256 details: $TESTTMP/Test?cmd=changeset;node=ef63ca68695b
257 description:
257 description:
258 absym
258 absym
259
259
260 diffs (12 lines):
260 diffs (12 lines):
261
261
262 diff -r a2392c293916 -r ef63ca68695b a
262 diff -r a2392c293916 -r ef63ca68695b a
263 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
263 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
264 +++ b/a Thu Jan 01 00:00:00 1970 +0000
264 +++ b/a Thu Jan 01 00:00:00 1970 +0000
265 @@ -0,0 +1,3 @@
265 @@ -0,0 +1,3 @@
266 +expand $Id$
266 +expand $Id$
267 +do not process $Id:
267 +do not process $Id:
268 +xxx $
268 +xxx $
269 diff -r a2392c293916 -r ef63ca68695b b
269 diff -r a2392c293916 -r ef63ca68695b b
270 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
270 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
271 +++ b/b Thu Jan 01 00:00:00 1970 +0000
271 +++ b/b Thu Jan 01 00:00:00 1970 +0000
272 @@ -0,0 +1,1 @@
272 @@ -0,0 +1,1 @@
273 +ignore $Id$
273 +ignore $Id$
274 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
274 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
275
275
276 $ cp $HGRCPATH.nohooks $HGRCPATH
276 $ cp $HGRCPATH.nohooks $HGRCPATH
277
277
278 Touch files and check with status
278 Touch files and check with status
279
279
280 $ touch a b
280 $ touch a b
281 $ hg status
281 $ hg status
282
282
283 Update and expand
283 Update and expand
284
284
285 $ rm sym a b
285 $ rm sym a b
286 $ hg update -C
286 $ hg update -C
287 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
287 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
288 $ cat a b
288 $ cat a b
289 expand $Id: a,v ef63ca68695b 1970/01/01 00:00:00 user $
289 expand $Id: a,v ef63ca68695b 1970/01/01 00:00:00 user $
290 do not process $Id:
290 do not process $Id:
291 xxx $
291 xxx $
292 ignore $Id$
292 ignore $Id$
293
293
294 Check whether expansion is filewise and file mode is preserved
294 Check whether expansion is filewise and file mode is preserved
295
295
296 $ echo '$Id$' > c
296 $ echo '$Id$' > c
297 $ echo 'tests for different changenodes' >> c
297 $ echo 'tests for different changenodes' >> c
298 #if unix-permissions
298 #if unix-permissions
299 $ chmod 600 c
299 $ chmod 600 c
300 $ ls -l c | cut -b 1-10
300 $ ls -l c | cut -b 1-10
301 -rw-------
301 -rw-------
302 #endif
302 #endif
303
303
304 commit file c
304 commit file c
305
305
306 $ hg commit -A -mcndiff -d '1 0' -u 'User Name <user@example.com>'
306 $ hg commit -A -mcndiff -d '1 0' -u 'User Name <user@example.com>'
307 adding c
307 adding c
308 #if unix-permissions
308 #if unix-permissions
309 $ ls -l c | cut -b 1-10
309 $ ls -l c | cut -b 1-10
310 -rw-------
310 -rw-------
311 #endif
311 #endif
312
312
313 force expansion
313 force expansion
314
314
315 $ hg -v kwexpand
315 $ hg -v kwexpand
316 overwriting a expanding keywords
316 overwriting a expanding keywords
317 overwriting c expanding keywords
317 overwriting c expanding keywords
318
318
319 compare changenodes in a and c
319 compare changenodes in a and c
320
320
321 $ cat a c
321 $ cat a c
322 expand $Id: a,v ef63ca68695b 1970/01/01 00:00:00 user $
322 expand $Id: a,v ef63ca68695b 1970/01/01 00:00:00 user $
323 do not process $Id:
323 do not process $Id:
324 xxx $
324 xxx $
325 $Id: c,v 40a904bbbe4c 1970/01/01 00:00:01 user $
325 $Id: c,v 40a904bbbe4c 1970/01/01 00:00:01 user $
326 tests for different changenodes
326 tests for different changenodes
327
327
328 record
328 record
329
329
330 $ echo '$Id$' > r
330 $ echo '$Id$' > r
331 $ hg add r
331 $ hg add r
332
332
333 record chunk
333 record chunk
334
334
335 >>> lines = open('a', 'rb').readlines()
335 >>> lines = open('a', 'rb').readlines()
336 >>> lines.insert(1, 'foo\n')
336 >>> lines.insert(1, 'foo\n')
337 >>> lines.append('bar\n')
337 >>> lines.append('bar\n')
338 >>> open('a', 'wb').writelines(lines)
338 >>> open('a', 'wb').writelines(lines)
339 $ hg record -d '10 1' -m rectest a<<EOF
339 $ hg record -d '10 1' -m rectest a<<EOF
340 > y
340 > y
341 > y
341 > y
342 > n
342 > n
343 > EOF
343 > EOF
344 diff --git a/a b/a
344 diff --git a/a b/a
345 2 hunks, 2 lines changed
345 2 hunks, 2 lines changed
346 examine changes to 'a'? [Ynesfdaq?]
346 examine changes to 'a'? [Ynesfdaq?]
347 @@ -1,3 +1,4 @@
347 @@ -1,3 +1,4 @@
348 expand $Id$
348 expand $Id$
349 +foo
349 +foo
350 do not process $Id:
350 do not process $Id:
351 xxx $
351 xxx $
352 record change 1/2 to 'a'? [Ynesfdaq?]
352 record change 1/2 to 'a'? [Ynesfdaq?]
353 @@ -2,2 +3,3 @@
353 @@ -2,2 +3,3 @@
354 do not process $Id:
354 do not process $Id:
355 xxx $
355 xxx $
356 +bar
356 +bar
357 record change 2/2 to 'a'? [Ynesfdaq?]
357 record change 2/2 to 'a'? [Ynesfdaq?]
358
358
359 $ hg identify
359 $ hg identify
360 5f5eb23505c3+ tip
360 5f5eb23505c3+ tip
361 $ hg status
361 $ hg status
362 M a
362 M a
363 A r
363 A r
364
364
365 Cat modified file a
365 Cat modified file a
366
366
367 $ cat a
367 $ cat a
368 expand $Id: a,v 5f5eb23505c3 1970/01/01 00:00:10 test $
368 expand $Id: a,v 5f5eb23505c3 1970/01/01 00:00:10 test $
369 foo
369 foo
370 do not process $Id:
370 do not process $Id:
371 xxx $
371 xxx $
372 bar
372 bar
373
373
374 Diff remaining chunk
374 Diff remaining chunk
375
375
376 $ hg diff a
376 $ hg diff a
377 diff -r 5f5eb23505c3 a
377 diff -r 5f5eb23505c3 a
378 --- a/a Thu Jan 01 00:00:09 1970 -0000
378 --- a/a Thu Jan 01 00:00:09 1970 -0000
379 +++ b/a * (glob)
379 +++ b/a * (glob)
380 @@ -2,3 +2,4 @@
380 @@ -2,3 +2,4 @@
381 foo
381 foo
382 do not process $Id:
382 do not process $Id:
383 xxx $
383 xxx $
384 +bar
384 +bar
385
385
386 $ hg rollback
386 $ hg rollback
387 repository tip rolled back to revision 2 (undo commit)
387 repository tip rolled back to revision 2 (undo commit)
388 working directory now based on revision 2
388 working directory now based on revision 2
389
389
390 Record all chunks in file a
390 Record all chunks in file a
391
391
392 $ echo foo > msg
392 $ echo foo > msg
393
393
394 - do not use "hg record -m" here!
394 - do not use "hg record -m" here!
395
395
396 $ hg record -l msg -d '11 1' a<<EOF
396 $ hg record -l msg -d '11 1' a<<EOF
397 > y
397 > y
398 > y
398 > y
399 > y
399 > y
400 > EOF
400 > EOF
401 diff --git a/a b/a
401 diff --git a/a b/a
402 2 hunks, 2 lines changed
402 2 hunks, 2 lines changed
403 examine changes to 'a'? [Ynesfdaq?]
403 examine changes to 'a'? [Ynesfdaq?]
404 @@ -1,3 +1,4 @@
404 @@ -1,3 +1,4 @@
405 expand $Id$
405 expand $Id$
406 +foo
406 +foo
407 do not process $Id:
407 do not process $Id:
408 xxx $
408 xxx $
409 record change 1/2 to 'a'? [Ynesfdaq?]
409 record change 1/2 to 'a'? [Ynesfdaq?]
410 @@ -2,2 +3,3 @@
410 @@ -2,2 +3,3 @@
411 do not process $Id:
411 do not process $Id:
412 xxx $
412 xxx $
413 +bar
413 +bar
414 record change 2/2 to 'a'? [Ynesfdaq?]
414 record change 2/2 to 'a'? [Ynesfdaq?]
415
415
416 File a should be clean
416 File a should be clean
417
417
418 $ hg status -A a
418 $ hg status -A a
419 C a
419 C a
420
420
421 rollback and revert expansion
421 rollback and revert expansion
422
422
423 $ cat a
423 $ cat a
424 expand $Id: a,v 78e0a02d76aa 1970/01/01 00:00:11 test $
424 expand $Id: a,v 78e0a02d76aa 1970/01/01 00:00:11 test $
425 foo
425 foo
426 do not process $Id:
426 do not process $Id:
427 xxx $
427 xxx $
428 bar
428 bar
429 $ hg --verbose rollback
429 $ hg --verbose rollback
430 repository tip rolled back to revision 2 (undo commit)
430 repository tip rolled back to revision 2 (undo commit)
431 working directory now based on revision 2
431 working directory now based on revision 2
432 overwriting a expanding keywords
432 overwriting a expanding keywords
433 $ hg status a
433 $ hg status a
434 M a
434 M a
435 $ cat a
435 $ cat a
436 expand $Id: a,v ef63ca68695b 1970/01/01 00:00:00 user $
436 expand $Id: a,v ef63ca68695b 1970/01/01 00:00:00 user $
437 foo
437 foo
438 do not process $Id:
438 do not process $Id:
439 xxx $
439 xxx $
440 bar
440 bar
441 $ echo '$Id$' > y
441 $ echo '$Id$' > y
442 $ echo '$Id$' > z
442 $ echo '$Id$' > z
443 $ hg add y
443 $ hg add y
444 $ hg commit -Am "rollback only" z
444 $ hg commit -Am "rollback only" z
445 $ cat z
445 $ cat z
446 $Id: z,v 45a5d3adce53 1970/01/01 00:00:00 test $
446 $Id: z,v 45a5d3adce53 1970/01/01 00:00:00 test $
447 $ hg --verbose rollback
447 $ hg --verbose rollback
448 repository tip rolled back to revision 2 (undo commit)
448 repository tip rolled back to revision 2 (undo commit)
449 working directory now based on revision 2
449 working directory now based on revision 2
450 overwriting z shrinking keywords
450 overwriting z shrinking keywords
451
451
452 Only z should be overwritten
452 Only z should be overwritten
453
453
454 $ hg status a y z
454 $ hg status a y z
455 M a
455 M a
456 A y
456 A y
457 A z
457 A z
458 $ cat z
458 $ cat z
459 $Id$
459 $Id$
460 $ hg forget y z
460 $ hg forget y z
461 $ rm y z
461 $ rm y z
462
462
463 record added file alone
463 record added file alone
464
464
465 $ hg -v record -l msg -d '12 2' r<<EOF
465 $ hg -v record -l msg -d '12 2' r<<EOF
466 > y
466 > y
467 > EOF
467 > EOF
468 diff --git a/r b/r
468 diff --git a/r b/r
469 new file mode 100644
469 new file mode 100644
470 examine changes to 'r'? [Ynesfdaq?]
470 examine changes to 'r'? [Ynesfdaq?]
471 r
471 r
472 committed changeset 3:82a2f715724d
472 committed changeset 3:82a2f715724d
473 overwriting r expanding keywords
473 overwriting r expanding keywords
474 - status call required for dirstate.normallookup() check
474 - status call required for dirstate.normallookup() check
475 $ hg status r
475 $ hg status r
476 $ hg --verbose rollback
476 $ hg --verbose rollback
477 repository tip rolled back to revision 2 (undo commit)
477 repository tip rolled back to revision 2 (undo commit)
478 working directory now based on revision 2
478 working directory now based on revision 2
479 overwriting r shrinking keywords
479 overwriting r shrinking keywords
480 $ hg forget r
480 $ hg forget r
481 $ rm msg r
481 $ rm msg r
482 $ hg update -C
482 $ hg update -C
483 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
483 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
484
484
485 record added keyword ignored file
485 record added keyword ignored file
486
486
487 $ echo '$Id$' > i
487 $ echo '$Id$' > i
488 $ hg add i
488 $ hg add i
489 $ hg --verbose record -d '13 1' -m recignored<<EOF
489 $ hg --verbose record -d '13 1' -m recignored<<EOF
490 > y
490 > y
491 > EOF
491 > EOF
492 diff --git a/i b/i
492 diff --git a/i b/i
493 new file mode 100644
493 new file mode 100644
494 examine changes to 'i'? [Ynesfdaq?]
494 examine changes to 'i'? [Ynesfdaq?]
495 i
495 i
496 committed changeset 3:9f40ceb5a072
496 committed changeset 3:9f40ceb5a072
497 $ cat i
497 $ cat i
498 $Id$
498 $Id$
499 $ hg -q rollback
499 $ hg -q rollback
500 $ hg forget i
500 $ hg forget i
501 $ rm i
501 $ rm i
502
502
503 amend
503 amend
504
504
505 $ echo amend >> a
505 $ echo amend >> a
506 $ echo amend >> b
506 $ echo amend >> b
507 $ hg -q commit -d '14 1' -m 'prepare amend'
507 $ hg -q commit -d '14 1' -m 'prepare amend'
508
508
509 $ hg --debug commit --amend -d '15 1' -m 'amend without changes' | grep keywords
509 $ hg --debug commit --amend -d '15 1' -m 'amend without changes' | grep keywords
510 overwriting a expanding keywords
510 overwriting a expanding keywords
511 $ hg -q id
511 $ hg -q id
512 67d8c481a6be
512 67d8c481a6be
513 $ head -1 a
513 $ head -1 a
514 expand $Id: a,v 67d8c481a6be 1970/01/01 00:00:15 test $
514 expand $Id: a,v 67d8c481a6be 1970/01/01 00:00:15 test $
515
515
516 $ hg -q strip -n tip
516 $ hg -q strip -n tip
517
517
518 Test patch queue repo
518 Test patch queue repo
519
519
520 $ hg init --mq
520 $ hg init --mq
521 $ hg qimport -r tip -n mqtest.diff
521 $ hg qimport -r tip -n mqtest.diff
522 $ hg commit --mq -m mqtest
522 $ hg commit --mq -m mqtest
523
523
524 Keywords should not be expanded in patch
524 Keywords should not be expanded in patch
525
525
526 $ cat .hg/patches/mqtest.diff
526 $ cat .hg/patches/mqtest.diff
527 # HG changeset patch
527 # HG changeset patch
528 # User User Name <user@example.com>
528 # User User Name <user@example.com>
529 # Date 1 0
529 # Date 1 0
530 # Node ID 40a904bbbe4cd4ab0a1f28411e35db26341a40ad
530 # Node ID 40a904bbbe4cd4ab0a1f28411e35db26341a40ad
531 # Parent ef63ca68695bc9495032c6fda1350c71e6d256e9
531 # Parent ef63ca68695bc9495032c6fda1350c71e6d256e9
532 cndiff
532 cndiff
533
533
534 diff -r ef63ca68695b -r 40a904bbbe4c c
534 diff -r ef63ca68695b -r 40a904bbbe4c c
535 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
535 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
536 +++ b/c Thu Jan 01 00:00:01 1970 +0000
536 +++ b/c Thu Jan 01 00:00:01 1970 +0000
537 @@ -0,0 +1,2 @@
537 @@ -0,0 +1,2 @@
538 +$Id$
538 +$Id$
539 +tests for different changenodes
539 +tests for different changenodes
540
540
541 $ hg qpop
541 $ hg qpop
542 popping mqtest.diff
542 popping mqtest.diff
543 patch queue now empty
543 patch queue now empty
544
544
545 qgoto, implying qpush, should expand
545 qgoto, implying qpush, should expand
546
546
547 $ hg qgoto mqtest.diff
547 $ hg qgoto mqtest.diff
548 applying mqtest.diff
548 applying mqtest.diff
549 now at: mqtest.diff
549 now at: mqtest.diff
550 $ cat c
550 $ cat c
551 $Id: c,v 40a904bbbe4c 1970/01/01 00:00:01 user $
551 $Id: c,v 40a904bbbe4c 1970/01/01 00:00:01 user $
552 tests for different changenodes
552 tests for different changenodes
553 $ hg cat c
553 $ hg cat c
554 $Id: c,v 40a904bbbe4c 1970/01/01 00:00:01 user $
554 $Id: c,v 40a904bbbe4c 1970/01/01 00:00:01 user $
555 tests for different changenodes
555 tests for different changenodes
556
556
557 Keywords should not be expanded in filelog
557 Keywords should not be expanded in filelog
558
558
559 $ hg --config 'extensions.keyword=!' cat c
559 $ hg --config 'extensions.keyword=!' cat c
560 $Id$
560 $Id$
561 tests for different changenodes
561 tests for different changenodes
562
562
563 qpop and move on
563 qpop and move on
564
564
565 $ hg qpop
565 $ hg qpop
566 popping mqtest.diff
566 popping mqtest.diff
567 patch queue now empty
567 patch queue now empty
568
568
569 Copy and show added kwfiles
569 Copy and show added kwfiles
570
570
571 $ hg cp a c
571 $ hg cp a c
572 $ hg kwfiles
572 $ hg kwfiles
573 a
573 a
574 c
574 c
575
575
576 Commit and show expansion in original and copy
576 Commit and show expansion in original and copy
577
577
578 $ hg --debug commit -ma2c -d '1 0' -u 'User Name <user@example.com>'
578 $ hg --debug commit -ma2c -d '1 0' -u 'User Name <user@example.com>'
579 c
579 c
580 c: copy a:0045e12f6c5791aac80ca6cbfd97709a88307292
580 c: copy a:0045e12f6c5791aac80ca6cbfd97709a88307292
581 removing unknown node 40a904bbbe4c from 1-phase boundary
582 overwriting c expanding keywords
581 overwriting c expanding keywords
583 committed changeset 2:25736cf2f5cbe41f6be4e6784ef6ecf9f3bbcc7d
582 committed changeset 2:25736cf2f5cbe41f6be4e6784ef6ecf9f3bbcc7d
584 $ cat a c
583 $ cat a c
585 expand $Id: a,v ef63ca68695b 1970/01/01 00:00:00 user $
584 expand $Id: a,v ef63ca68695b 1970/01/01 00:00:00 user $
586 do not process $Id:
585 do not process $Id:
587 xxx $
586 xxx $
588 expand $Id: c,v 25736cf2f5cb 1970/01/01 00:00:01 user $
587 expand $Id: c,v 25736cf2f5cb 1970/01/01 00:00:01 user $
589 do not process $Id:
588 do not process $Id:
590 xxx $
589 xxx $
591
590
592 Touch copied c and check its status
591 Touch copied c and check its status
593
592
594 $ touch c
593 $ touch c
595 $ hg status
594 $ hg status
596
595
597 Copy kwfile to keyword ignored file unexpanding keywords
596 Copy kwfile to keyword ignored file unexpanding keywords
598
597
599 $ hg --verbose copy a i
598 $ hg --verbose copy a i
600 copying a to i
599 copying a to i
601 overwriting i shrinking keywords
600 overwriting i shrinking keywords
602 $ head -n 1 i
601 $ head -n 1 i
603 expand $Id$
602 expand $Id$
604 $ hg forget i
603 $ hg forget i
605 $ rm i
604 $ rm i
606
605
607 Copy ignored file to ignored file: no overwriting
606 Copy ignored file to ignored file: no overwriting
608
607
609 $ hg --verbose copy b i
608 $ hg --verbose copy b i
610 copying b to i
609 copying b to i
611 $ hg forget i
610 $ hg forget i
612 $ rm i
611 $ rm i
613
612
614 cp symlink file; hg cp -A symlink file (part1)
613 cp symlink file; hg cp -A symlink file (part1)
615 - copied symlink points to kwfile: overwrite
614 - copied symlink points to kwfile: overwrite
616
615
617 #if symlink
616 #if symlink
618 $ cp sym i
617 $ cp sym i
619 $ ls -l i
618 $ ls -l i
620 -rw-r--r--* (glob)
619 -rw-r--r--* (glob)
621 $ head -1 i
620 $ head -1 i
622 expand $Id: a,v ef63ca68695b 1970/01/01 00:00:00 user $
621 expand $Id: a,v ef63ca68695b 1970/01/01 00:00:00 user $
623 $ hg copy --after --verbose sym i
622 $ hg copy --after --verbose sym i
624 copying sym to i
623 copying sym to i
625 overwriting i shrinking keywords
624 overwriting i shrinking keywords
626 $ head -1 i
625 $ head -1 i
627 expand $Id$
626 expand $Id$
628 $ hg forget i
627 $ hg forget i
629 $ rm i
628 $ rm i
630 #endif
629 #endif
631
630
632 Test different options of hg kwfiles
631 Test different options of hg kwfiles
633
632
634 $ hg kwfiles
633 $ hg kwfiles
635 a
634 a
636 c
635 c
637 $ hg -v kwfiles --ignore
636 $ hg -v kwfiles --ignore
638 I b
637 I b
639 I sym
638 I sym
640 $ hg kwfiles --all
639 $ hg kwfiles --all
641 K a
640 K a
642 K c
641 K c
643 I b
642 I b
644 I sym
643 I sym
645
644
646 Diff specific revision
645 Diff specific revision
647
646
648 $ hg diff --rev 1
647 $ hg diff --rev 1
649 diff -r ef63ca68695b c
648 diff -r ef63ca68695b c
650 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
649 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
651 +++ b/c * (glob)
650 +++ b/c * (glob)
652 @@ -0,0 +1,3 @@
651 @@ -0,0 +1,3 @@
653 +expand $Id$
652 +expand $Id$
654 +do not process $Id:
653 +do not process $Id:
655 +xxx $
654 +xxx $
656
655
657 Status after rollback:
656 Status after rollback:
658
657
659 $ hg rollback
658 $ hg rollback
660 repository tip rolled back to revision 1 (undo commit)
659 repository tip rolled back to revision 1 (undo commit)
661 working directory now based on revision 1
660 working directory now based on revision 1
662 $ hg status
661 $ hg status
663 A c
662 A c
664 $ hg update --clean
663 $ hg update --clean
665 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
664 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
666
665
667 #if symlink
666 #if symlink
668
667
669 cp symlink file; hg cp -A symlink file (part2)
668 cp symlink file; hg cp -A symlink file (part2)
670 - copied symlink points to kw ignored file: do not overwrite
669 - copied symlink points to kw ignored file: do not overwrite
671
670
672 $ cat a > i
671 $ cat a > i
673 $ ln -s i symignored
672 $ ln -s i symignored
674 $ hg commit -Am 'fake expansion in ignored and symlink' i symignored
673 $ hg commit -Am 'fake expansion in ignored and symlink' i symignored
675 $ cp symignored x
674 $ cp symignored x
676 $ hg copy --after --verbose symignored x
675 $ hg copy --after --verbose symignored x
677 copying symignored to x
676 copying symignored to x
678 $ head -n 1 x
677 $ head -n 1 x
679 expand $Id: a,v ef63ca68695b 1970/01/01 00:00:00 user $
678 expand $Id: a,v ef63ca68695b 1970/01/01 00:00:00 user $
680 $ hg forget x
679 $ hg forget x
681 $ rm x
680 $ rm x
682
681
683 $ hg rollback
682 $ hg rollback
684 repository tip rolled back to revision 1 (undo commit)
683 repository tip rolled back to revision 1 (undo commit)
685 working directory now based on revision 1
684 working directory now based on revision 1
686 $ hg update --clean
685 $ hg update --clean
687 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
686 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
688 $ rm i symignored
687 $ rm i symignored
689
688
690 #endif
689 #endif
691
690
692 Custom keywordmaps as argument to kwdemo
691 Custom keywordmaps as argument to kwdemo
693
692
694 $ hg --quiet kwdemo "Xinfo = {author}: {desc}"
693 $ hg --quiet kwdemo "Xinfo = {author}: {desc}"
695 [extensions]
694 [extensions]
696 keyword =
695 keyword =
697 [keyword]
696 [keyword]
698 ** =
697 ** =
699 b = ignore
698 b = ignore
700 demo.txt =
699 demo.txt =
701 i = ignore
700 i = ignore
702 [keywordset]
701 [keywordset]
703 svn = False
702 svn = False
704 [keywordmaps]
703 [keywordmaps]
705 Xinfo = {author}: {desc}
704 Xinfo = {author}: {desc}
706 $Xinfo: test: hg keyword configuration and expansion example $
705 $Xinfo: test: hg keyword configuration and expansion example $
707
706
708 Configure custom keywordmaps
707 Configure custom keywordmaps
709
708
710 $ cat <<EOF >>$HGRCPATH
709 $ cat <<EOF >>$HGRCPATH
711 > [keywordmaps]
710 > [keywordmaps]
712 > Id = {file} {node|short} {date|rfc822date} {author|user}
711 > Id = {file} {node|short} {date|rfc822date} {author|user}
713 > Xinfo = {author}: {desc}
712 > Xinfo = {author}: {desc}
714 > EOF
713 > EOF
715
714
716 Cat and hg cat files before custom expansion
715 Cat and hg cat files before custom expansion
717
716
718 $ cat a b
717 $ cat a b
719 expand $Id: a,v ef63ca68695b 1970/01/01 00:00:00 user $
718 expand $Id: a,v ef63ca68695b 1970/01/01 00:00:00 user $
720 do not process $Id:
719 do not process $Id:
721 xxx $
720 xxx $
722 ignore $Id$
721 ignore $Id$
723 $ hg cat sym a b && echo
722 $ hg cat sym a b && echo
724 expand $Id: a ef63ca68695b Thu, 01 Jan 1970 00:00:00 +0000 user $
723 expand $Id: a ef63ca68695b Thu, 01 Jan 1970 00:00:00 +0000 user $
725 do not process $Id:
724 do not process $Id:
726 xxx $
725 xxx $
727 ignore $Id$
726 ignore $Id$
728 a
727 a
729
728
730 Write custom keyword and prepare multi-line commit message
729 Write custom keyword and prepare multi-line commit message
731
730
732 $ echo '$Xinfo$' >> a
731 $ echo '$Xinfo$' >> a
733 $ cat <<EOF >> log
732 $ cat <<EOF >> log
734 > firstline
733 > firstline
735 > secondline
734 > secondline
736 > EOF
735 > EOF
737
736
738 Interrupted commit should not change state
737 Interrupted commit should not change state
739
738
740 $ hg commit
739 $ hg commit
741 abort: empty commit message
740 abort: empty commit message
742 [255]
741 [255]
743 $ hg status
742 $ hg status
744 M a
743 M a
745 ? c
744 ? c
746 ? log
745 ? log
747
746
748 Commit with multi-line message and custom expansion
747 Commit with multi-line message and custom expansion
749
748
750 $ hg --debug commit -l log -d '2 0' -u 'User Name <user@example.com>'
749 $ hg --debug commit -l log -d '2 0' -u 'User Name <user@example.com>'
751 a
750 a
752 removing unknown node 40a904bbbe4c from 1-phase boundary
753 overwriting a expanding keywords
751 overwriting a expanding keywords
754 committed changeset 2:bb948857c743469b22bbf51f7ec8112279ca5d83
752 committed changeset 2:bb948857c743469b22bbf51f7ec8112279ca5d83
755 $ rm log
753 $ rm log
756
754
757 Stat, verify and show custom expansion (firstline)
755 Stat, verify and show custom expansion (firstline)
758
756
759 $ hg status
757 $ hg status
760 ? c
758 ? c
761 $ hg verify
759 $ hg verify
762 checking changesets
760 checking changesets
763 checking manifests
761 checking manifests
764 crosschecking files in changesets and manifests
762 crosschecking files in changesets and manifests
765 checking files
763 checking files
766 3 files, 3 changesets, 4 total revisions
764 3 files, 3 changesets, 4 total revisions
767 $ cat a b
765 $ cat a b
768 expand $Id: a bb948857c743 Thu, 01 Jan 1970 00:00:02 +0000 user $
766 expand $Id: a bb948857c743 Thu, 01 Jan 1970 00:00:02 +0000 user $
769 do not process $Id:
767 do not process $Id:
770 xxx $
768 xxx $
771 $Xinfo: User Name <user@example.com>: firstline $
769 $Xinfo: User Name <user@example.com>: firstline $
772 ignore $Id$
770 ignore $Id$
773 $ hg cat sym a b && echo
771 $ hg cat sym a b && echo
774 expand $Id: a bb948857c743 Thu, 01 Jan 1970 00:00:02 +0000 user $
772 expand $Id: a bb948857c743 Thu, 01 Jan 1970 00:00:02 +0000 user $
775 do not process $Id:
773 do not process $Id:
776 xxx $
774 xxx $
777 $Xinfo: User Name <user@example.com>: firstline $
775 $Xinfo: User Name <user@example.com>: firstline $
778 ignore $Id$
776 ignore $Id$
779 a
777 a
780
778
781 annotate
779 annotate
782
780
783 $ hg annotate a
781 $ hg annotate a
784 1: expand $Id$
782 1: expand $Id$
785 1: do not process $Id:
783 1: do not process $Id:
786 1: xxx $
784 1: xxx $
787 2: $Xinfo$
785 2: $Xinfo$
788
786
789 remove with status checks
787 remove with status checks
790
788
791 $ hg debugrebuildstate
789 $ hg debugrebuildstate
792 $ hg remove a
790 $ hg remove a
793 $ hg --debug commit -m rma
791 $ hg --debug commit -m rma
794 committed changeset 3:d14c712653769de926994cf7fbb06c8fbd68f012
792 committed changeset 3:d14c712653769de926994cf7fbb06c8fbd68f012
795 $ hg status
793 $ hg status
796 ? c
794 ? c
797
795
798 Rollback, revert, and check expansion
796 Rollback, revert, and check expansion
799
797
800 $ hg rollback
798 $ hg rollback
801 repository tip rolled back to revision 2 (undo commit)
799 repository tip rolled back to revision 2 (undo commit)
802 working directory now based on revision 2
800 working directory now based on revision 2
803 $ hg status
801 $ hg status
804 R a
802 R a
805 ? c
803 ? c
806 $ hg revert --no-backup --rev tip a
804 $ hg revert --no-backup --rev tip a
807 $ cat a
805 $ cat a
808 expand $Id: a bb948857c743 Thu, 01 Jan 1970 00:00:02 +0000 user $
806 expand $Id: a bb948857c743 Thu, 01 Jan 1970 00:00:02 +0000 user $
809 do not process $Id:
807 do not process $Id:
810 xxx $
808 xxx $
811 $Xinfo: User Name <user@example.com>: firstline $
809 $Xinfo: User Name <user@example.com>: firstline $
812
810
813 Clone to test global and local configurations
811 Clone to test global and local configurations
814
812
815 $ cd ..
813 $ cd ..
816
814
817 Expansion in destination with global configuration
815 Expansion in destination with global configuration
818
816
819 $ hg --quiet clone Test globalconf
817 $ hg --quiet clone Test globalconf
820 $ cat globalconf/a
818 $ cat globalconf/a
821 expand $Id: a bb948857c743 Thu, 01 Jan 1970 00:00:02 +0000 user $
819 expand $Id: a bb948857c743 Thu, 01 Jan 1970 00:00:02 +0000 user $
822 do not process $Id:
820 do not process $Id:
823 xxx $
821 xxx $
824 $Xinfo: User Name <user@example.com>: firstline $
822 $Xinfo: User Name <user@example.com>: firstline $
825
823
826 No expansion in destination with local configuration in origin only
824 No expansion in destination with local configuration in origin only
827
825
828 $ hg --quiet --config 'keyword.**=ignore' clone Test localconf
826 $ hg --quiet --config 'keyword.**=ignore' clone Test localconf
829 $ cat localconf/a
827 $ cat localconf/a
830 expand $Id$
828 expand $Id$
831 do not process $Id:
829 do not process $Id:
832 xxx $
830 xxx $
833 $Xinfo$
831 $Xinfo$
834
832
835 Clone to test incoming
833 Clone to test incoming
836
834
837 $ hg clone -r1 Test Test-a
835 $ hg clone -r1 Test Test-a
838 adding changesets
836 adding changesets
839 adding manifests
837 adding manifests
840 adding file changes
838 adding file changes
841 added 2 changesets with 3 changes to 3 files
839 added 2 changesets with 3 changes to 3 files
842 updating to branch default
840 updating to branch default
843 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
841 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
844 $ cd Test-a
842 $ cd Test-a
845 $ cat <<EOF >> .hg/hgrc
843 $ cat <<EOF >> .hg/hgrc
846 > [paths]
844 > [paths]
847 > default = ../Test
845 > default = ../Test
848 > EOF
846 > EOF
849 $ hg incoming
847 $ hg incoming
850 comparing with $TESTTMP/Test (glob)
848 comparing with $TESTTMP/Test (glob)
851 searching for changes
849 searching for changes
852 changeset: 2:bb948857c743
850 changeset: 2:bb948857c743
853 tag: tip
851 tag: tip
854 user: User Name <user@example.com>
852 user: User Name <user@example.com>
855 date: Thu Jan 01 00:00:02 1970 +0000
853 date: Thu Jan 01 00:00:02 1970 +0000
856 summary: firstline
854 summary: firstline
857
855
858 Imported patch should not be rejected
856 Imported patch should not be rejected
859
857
860 >>> import re
858 >>> import re
861 >>> text = re.sub(r'(Id.*)', r'\1 rejecttest', open('a').read())
859 >>> text = re.sub(r'(Id.*)', r'\1 rejecttest', open('a').read())
862 >>> open('a', 'wb').write(text)
860 >>> open('a', 'wb').write(text)
863 $ hg --debug commit -m'rejects?' -d '3 0' -u 'User Name <user@example.com>'
861 $ hg --debug commit -m'rejects?' -d '3 0' -u 'User Name <user@example.com>'
864 a
862 a
865 overwriting a expanding keywords
863 overwriting a expanding keywords
866 committed changeset 2:85e279d709ffc28c9fdd1b868570985fc3d87082
864 committed changeset 2:85e279d709ffc28c9fdd1b868570985fc3d87082
867 $ hg export -o ../rejecttest.diff tip
865 $ hg export -o ../rejecttest.diff tip
868 $ cd ../Test
866 $ cd ../Test
869 $ hg import ../rejecttest.diff
867 $ hg import ../rejecttest.diff
870 applying ../rejecttest.diff
868 applying ../rejecttest.diff
871 $ cat a b
869 $ cat a b
872 expand $Id: a 4e0994474d25 Thu, 01 Jan 1970 00:00:03 +0000 user $ rejecttest
870 expand $Id: a 4e0994474d25 Thu, 01 Jan 1970 00:00:03 +0000 user $ rejecttest
873 do not process $Id: rejecttest
871 do not process $Id: rejecttest
874 xxx $
872 xxx $
875 $Xinfo: User Name <user@example.com>: rejects? $
873 $Xinfo: User Name <user@example.com>: rejects? $
876 ignore $Id$
874 ignore $Id$
877
875
878 $ hg rollback
876 $ hg rollback
879 repository tip rolled back to revision 2 (undo import)
877 repository tip rolled back to revision 2 (undo import)
880 working directory now based on revision 2
878 working directory now based on revision 2
881 $ hg update --clean
879 $ hg update --clean
882 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
880 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
883
881
884 kwexpand/kwshrink on selected files
882 kwexpand/kwshrink on selected files
885
883
886 $ mkdir x
884 $ mkdir x
887 $ hg copy a x/a
885 $ hg copy a x/a
888 $ hg --verbose kwshrink a
886 $ hg --verbose kwshrink a
889 overwriting a shrinking keywords
887 overwriting a shrinking keywords
890 - sleep required for dirstate.normal() check
888 - sleep required for dirstate.normal() check
891 $ sleep 1
889 $ sleep 1
892 $ hg status a
890 $ hg status a
893 $ hg --verbose kwexpand a
891 $ hg --verbose kwexpand a
894 overwriting a expanding keywords
892 overwriting a expanding keywords
895 $ hg status a
893 $ hg status a
896
894
897 kwexpand x/a should abort
895 kwexpand x/a should abort
898
896
899 $ hg --verbose kwexpand x/a
897 $ hg --verbose kwexpand x/a
900 abort: outstanding uncommitted changes
898 abort: outstanding uncommitted changes
901 [255]
899 [255]
902 $ cd x
900 $ cd x
903 $ hg --debug commit -m xa -d '3 0' -u 'User Name <user@example.com>'
901 $ hg --debug commit -m xa -d '3 0' -u 'User Name <user@example.com>'
904 x/a
902 x/a
905 x/a: copy a:779c764182ce5d43e2b1eb66ce06d7b47bfe342e
903 x/a: copy a:779c764182ce5d43e2b1eb66ce06d7b47bfe342e
906 overwriting x/a expanding keywords
904 overwriting x/a expanding keywords
907 committed changeset 3:b4560182a3f9a358179fd2d835c15e9da379c1e4
905 committed changeset 3:b4560182a3f9a358179fd2d835c15e9da379c1e4
908 $ cat a
906 $ cat a
909 expand $Id: x/a b4560182a3f9 Thu, 01 Jan 1970 00:00:03 +0000 user $
907 expand $Id: x/a b4560182a3f9 Thu, 01 Jan 1970 00:00:03 +0000 user $
910 do not process $Id:
908 do not process $Id:
911 xxx $
909 xxx $
912 $Xinfo: User Name <user@example.com>: xa $
910 $Xinfo: User Name <user@example.com>: xa $
913
911
914 kwshrink a inside directory x
912 kwshrink a inside directory x
915
913
916 $ hg --verbose kwshrink a
914 $ hg --verbose kwshrink a
917 overwriting x/a shrinking keywords
915 overwriting x/a shrinking keywords
918 $ cat a
916 $ cat a
919 expand $Id$
917 expand $Id$
920 do not process $Id:
918 do not process $Id:
921 xxx $
919 xxx $
922 $Xinfo$
920 $Xinfo$
923 $ cd ..
921 $ cd ..
924
922
925 kwexpand nonexistent
923 kwexpand nonexistent
926
924
927 $ hg kwexpand nonexistent
925 $ hg kwexpand nonexistent
928 nonexistent:* (glob)
926 nonexistent:* (glob)
929
927
930
928
931 #if serve
929 #if serve
932 hg serve
930 hg serve
933 - expand with hgweb file
931 - expand with hgweb file
934 - no expansion with hgweb annotate/changeset/filediff
932 - no expansion with hgweb annotate/changeset/filediff
935 - check errors
933 - check errors
936
934
937 $ hg serve -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
935 $ hg serve -p $HGPORT -d --pid-file=hg.pid -A access.log -E errors.log
938 $ cat hg.pid >> $DAEMON_PIDS
936 $ cat hg.pid >> $DAEMON_PIDS
939 $ "$TESTDIR/get-with-headers.py" localhost:$HGPORT 'file/tip/a/?style=raw'
937 $ "$TESTDIR/get-with-headers.py" localhost:$HGPORT 'file/tip/a/?style=raw'
940 200 Script output follows
938 200 Script output follows
941
939
942 expand $Id: a bb948857c743 Thu, 01 Jan 1970 00:00:02 +0000 user $
940 expand $Id: a bb948857c743 Thu, 01 Jan 1970 00:00:02 +0000 user $
943 do not process $Id:
941 do not process $Id:
944 xxx $
942 xxx $
945 $Xinfo: User Name <user@example.com>: firstline $
943 $Xinfo: User Name <user@example.com>: firstline $
946 $ "$TESTDIR/get-with-headers.py" localhost:$HGPORT 'annotate/tip/a/?style=raw'
944 $ "$TESTDIR/get-with-headers.py" localhost:$HGPORT 'annotate/tip/a/?style=raw'
947 200 Script output follows
945 200 Script output follows
948
946
949
947
950 user@1: expand $Id$
948 user@1: expand $Id$
951 user@1: do not process $Id:
949 user@1: do not process $Id:
952 user@1: xxx $
950 user@1: xxx $
953 user@2: $Xinfo$
951 user@2: $Xinfo$
954
952
955
953
956
954
957
955
958 $ "$TESTDIR/get-with-headers.py" localhost:$HGPORT 'rev/tip/?style=raw'
956 $ "$TESTDIR/get-with-headers.py" localhost:$HGPORT 'rev/tip/?style=raw'
959 200 Script output follows
957 200 Script output follows
960
958
961
959
962 # HG changeset patch
960 # HG changeset patch
963 # User User Name <user@example.com>
961 # User User Name <user@example.com>
964 # Date 3 0
962 # Date 3 0
965 # Node ID b4560182a3f9a358179fd2d835c15e9da379c1e4
963 # Node ID b4560182a3f9a358179fd2d835c15e9da379c1e4
966 # Parent bb948857c743469b22bbf51f7ec8112279ca5d83
964 # Parent bb948857c743469b22bbf51f7ec8112279ca5d83
967 xa
965 xa
968
966
969 diff -r bb948857c743 -r b4560182a3f9 x/a
967 diff -r bb948857c743 -r b4560182a3f9 x/a
970 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
968 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
971 +++ b/x/a Thu Jan 01 00:00:03 1970 +0000
969 +++ b/x/a Thu Jan 01 00:00:03 1970 +0000
972 @@ -0,0 +1,4 @@
970 @@ -0,0 +1,4 @@
973 +expand $Id$
971 +expand $Id$
974 +do not process $Id:
972 +do not process $Id:
975 +xxx $
973 +xxx $
976 +$Xinfo$
974 +$Xinfo$
977
975
978 $ "$TESTDIR/get-with-headers.py" localhost:$HGPORT 'diff/bb948857c743/a?style=raw'
976 $ "$TESTDIR/get-with-headers.py" localhost:$HGPORT 'diff/bb948857c743/a?style=raw'
979 200 Script output follows
977 200 Script output follows
980
978
981
979
982 diff -r ef63ca68695b -r bb948857c743 a
980 diff -r ef63ca68695b -r bb948857c743 a
983 --- a/a Thu Jan 01 00:00:00 1970 +0000
981 --- a/a Thu Jan 01 00:00:00 1970 +0000
984 +++ b/a Thu Jan 01 00:00:02 1970 +0000
982 +++ b/a Thu Jan 01 00:00:02 1970 +0000
985 @@ -1,3 +1,4 @@
983 @@ -1,3 +1,4 @@
986 expand $Id$
984 expand $Id$
987 do not process $Id:
985 do not process $Id:
988 xxx $
986 xxx $
989 +$Xinfo$
987 +$Xinfo$
990
988
991
989
992
990
993
991
994 $ cat errors.log
992 $ cat errors.log
995 #endif
993 #endif
996
994
997 Prepare merge and resolve tests
995 Prepare merge and resolve tests
998
996
999 $ echo '$Id$' > m
997 $ echo '$Id$' > m
1000 $ hg add m
998 $ hg add m
1001 $ hg commit -m 4kw
999 $ hg commit -m 4kw
1002 $ echo foo >> m
1000 $ echo foo >> m
1003 $ hg commit -m 5foo
1001 $ hg commit -m 5foo
1004
1002
1005 simplemerge
1003 simplemerge
1006
1004
1007 $ hg update 4
1005 $ hg update 4
1008 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1006 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1009 $ echo foo >> m
1007 $ echo foo >> m
1010 $ hg commit -m 6foo
1008 $ hg commit -m 6foo
1011 created new head
1009 created new head
1012 $ hg merge
1010 $ hg merge
1013 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
1011 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
1014 (branch merge, don't forget to commit)
1012 (branch merge, don't forget to commit)
1015 $ hg commit -m simplemerge
1013 $ hg commit -m simplemerge
1016 $ cat m
1014 $ cat m
1017 $Id: m 27d48ee14f67 Thu, 01 Jan 1970 00:00:00 +0000 test $
1015 $Id: m 27d48ee14f67 Thu, 01 Jan 1970 00:00:00 +0000 test $
1018 foo
1016 foo
1019
1017
1020 conflict: keyword should stay outside conflict zone
1018 conflict: keyword should stay outside conflict zone
1021
1019
1022 $ hg update 4
1020 $ hg update 4
1023 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1021 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1024 $ echo bar >> m
1022 $ echo bar >> m
1025 $ hg commit -m 8bar
1023 $ hg commit -m 8bar
1026 created new head
1024 created new head
1027 $ hg merge
1025 $ hg merge
1028 merging m
1026 merging m
1029 warning: conflicts during merge.
1027 warning: conflicts during merge.
1030 merging m incomplete! (edit conflicts, then use 'hg resolve --mark')
1028 merging m incomplete! (edit conflicts, then use 'hg resolve --mark')
1031 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
1029 0 files updated, 0 files merged, 0 files removed, 1 files unresolved
1032 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
1030 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
1033 [1]
1031 [1]
1034 $ cat m
1032 $ cat m
1035 $Id$
1033 $Id$
1036 <<<<<<< local
1034 <<<<<<< local
1037 bar
1035 bar
1038 =======
1036 =======
1039 foo
1037 foo
1040 >>>>>>> other
1038 >>>>>>> other
1041
1039
1042 resolve to local
1040 resolve to local
1043
1041
1044 $ HGMERGE=internal:local hg resolve -a
1042 $ HGMERGE=internal:local hg resolve -a
1045 $ hg commit -m localresolve
1043 $ hg commit -m localresolve
1046 $ cat m
1044 $ cat m
1047 $Id: m 800511b3a22d Thu, 01 Jan 1970 00:00:00 +0000 test $
1045 $Id: m 800511b3a22d Thu, 01 Jan 1970 00:00:00 +0000 test $
1048 bar
1046 bar
1049
1047
1050 Test restricted mode with transplant -b
1048 Test restricted mode with transplant -b
1051
1049
1052 $ hg update 6
1050 $ hg update 6
1053 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1051 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1054 $ hg branch foo
1052 $ hg branch foo
1055 marked working directory as branch foo
1053 marked working directory as branch foo
1056 (branches are permanent and global, did you want a bookmark?)
1054 (branches are permanent and global, did you want a bookmark?)
1057 $ mv a a.bak
1055 $ mv a a.bak
1058 $ echo foobranch > a
1056 $ echo foobranch > a
1059 $ cat a.bak >> a
1057 $ cat a.bak >> a
1060 $ rm a.bak
1058 $ rm a.bak
1061 $ hg commit -m 9foobranch
1059 $ hg commit -m 9foobranch
1062 $ hg update default
1060 $ hg update default
1063 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1061 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1064 $ hg -y transplant -b foo tip
1062 $ hg -y transplant -b foo tip
1065 applying 4aa30d025d50
1063 applying 4aa30d025d50
1066 4aa30d025d50 transplanted to e00abbf63521
1064 4aa30d025d50 transplanted to e00abbf63521
1067
1065
1068 Expansion in changeset but not in file
1066 Expansion in changeset but not in file
1069
1067
1070 $ hg tip -p
1068 $ hg tip -p
1071 changeset: 11:e00abbf63521
1069 changeset: 11:e00abbf63521
1072 tag: tip
1070 tag: tip
1073 parent: 9:800511b3a22d
1071 parent: 9:800511b3a22d
1074 user: test
1072 user: test
1075 date: Thu Jan 01 00:00:00 1970 +0000
1073 date: Thu Jan 01 00:00:00 1970 +0000
1076 summary: 9foobranch
1074 summary: 9foobranch
1077
1075
1078 diff -r 800511b3a22d -r e00abbf63521 a
1076 diff -r 800511b3a22d -r e00abbf63521 a
1079 --- a/a Thu Jan 01 00:00:00 1970 +0000
1077 --- a/a Thu Jan 01 00:00:00 1970 +0000
1080 +++ b/a Thu Jan 01 00:00:00 1970 +0000
1078 +++ b/a Thu Jan 01 00:00:00 1970 +0000
1081 @@ -1,3 +1,4 @@
1079 @@ -1,3 +1,4 @@
1082 +foobranch
1080 +foobranch
1083 expand $Id$
1081 expand $Id$
1084 do not process $Id:
1082 do not process $Id:
1085 xxx $
1083 xxx $
1086
1084
1087 $ head -n 2 a
1085 $ head -n 2 a
1088 foobranch
1086 foobranch
1089 expand $Id: a e00abbf63521 Thu, 01 Jan 1970 00:00:00 +0000 test $
1087 expand $Id: a e00abbf63521 Thu, 01 Jan 1970 00:00:00 +0000 test $
1090
1088
1091 Turn off expansion
1089 Turn off expansion
1092
1090
1093 $ hg -q rollback
1091 $ hg -q rollback
1094 $ hg -q update -C
1092 $ hg -q update -C
1095
1093
1096 kwshrink with unknown file u
1094 kwshrink with unknown file u
1097
1095
1098 $ cp a u
1096 $ cp a u
1099 $ hg --verbose kwshrink
1097 $ hg --verbose kwshrink
1100 overwriting a shrinking keywords
1098 overwriting a shrinking keywords
1101 overwriting m shrinking keywords
1099 overwriting m shrinking keywords
1102 overwriting x/a shrinking keywords
1100 overwriting x/a shrinking keywords
1103
1101
1104 Keywords shrunk in working directory, but not yet disabled
1102 Keywords shrunk in working directory, but not yet disabled
1105 - cat shows unexpanded keywords
1103 - cat shows unexpanded keywords
1106 - hg cat shows expanded keywords
1104 - hg cat shows expanded keywords
1107
1105
1108 $ cat a b
1106 $ cat a b
1109 expand $Id$
1107 expand $Id$
1110 do not process $Id:
1108 do not process $Id:
1111 xxx $
1109 xxx $
1112 $Xinfo$
1110 $Xinfo$
1113 ignore $Id$
1111 ignore $Id$
1114 $ hg cat sym a b && echo
1112 $ hg cat sym a b && echo
1115 expand $Id: a bb948857c743 Thu, 01 Jan 1970 00:00:02 +0000 user $
1113 expand $Id: a bb948857c743 Thu, 01 Jan 1970 00:00:02 +0000 user $
1116 do not process $Id:
1114 do not process $Id:
1117 xxx $
1115 xxx $
1118 $Xinfo: User Name <user@example.com>: firstline $
1116 $Xinfo: User Name <user@example.com>: firstline $
1119 ignore $Id$
1117 ignore $Id$
1120 a
1118 a
1121
1119
1122 Now disable keyword expansion
1120 Now disable keyword expansion
1123
1121
1124 $ rm "$HGRCPATH"
1122 $ rm "$HGRCPATH"
1125 $ cat a b
1123 $ cat a b
1126 expand $Id$
1124 expand $Id$
1127 do not process $Id:
1125 do not process $Id:
1128 xxx $
1126 xxx $
1129 $Xinfo$
1127 $Xinfo$
1130 ignore $Id$
1128 ignore $Id$
1131 $ hg cat sym a b && echo
1129 $ hg cat sym a b && echo
1132 expand $Id$
1130 expand $Id$
1133 do not process $Id:
1131 do not process $Id:
1134 xxx $
1132 xxx $
1135 $Xinfo$
1133 $Xinfo$
1136 ignore $Id$
1134 ignore $Id$
1137 a
1135 a
1138
1136
1139 $ cd ..
1137 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now