##// END OF EJS Templates
color: initialize color for the localrepo ui...
Pierre-Yves David -
r31111:95ec3ad6 default
parent child Browse files
Show More
@@ -1,2047 +1,2049 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 wdirrev,
23 wdirrev,
24 )
24 )
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 branchmap,
27 branchmap,
28 bundle2,
28 bundle2,
29 changegroup,
29 changegroup,
30 changelog,
30 changelog,
31 color,
31 context,
32 context,
32 dirstate,
33 dirstate,
33 dirstateguard,
34 dirstateguard,
34 encoding,
35 encoding,
35 error,
36 error,
36 exchange,
37 exchange,
37 extensions,
38 extensions,
38 filelog,
39 filelog,
39 hook,
40 hook,
40 lock as lockmod,
41 lock as lockmod,
41 manifest,
42 manifest,
42 match as matchmod,
43 match as matchmod,
43 merge as mergemod,
44 merge as mergemod,
44 mergeutil,
45 mergeutil,
45 namespaces,
46 namespaces,
46 obsolete,
47 obsolete,
47 pathutil,
48 pathutil,
48 peer,
49 peer,
49 phases,
50 phases,
50 pushkey,
51 pushkey,
51 repoview,
52 repoview,
52 revset,
53 revset,
53 revsetlang,
54 revsetlang,
54 scmutil,
55 scmutil,
55 store,
56 store,
56 subrepo,
57 subrepo,
57 tags as tagsmod,
58 tags as tagsmod,
58 transaction,
59 transaction,
59 txnutil,
60 txnutil,
60 util,
61 util,
61 )
62 )
62
63
63 release = lockmod.release
64 release = lockmod.release
64 urlerr = util.urlerr
65 urlerr = util.urlerr
65 urlreq = util.urlreq
66 urlreq = util.urlreq
66
67
67 class repofilecache(scmutil.filecache):
68 class repofilecache(scmutil.filecache):
68 """All filecache usage on repo are done for logic that should be unfiltered
69 """All filecache usage on repo are done for logic that should be unfiltered
69 """
70 """
70
71
71 def __get__(self, repo, type=None):
72 def __get__(self, repo, type=None):
72 if repo is None:
73 if repo is None:
73 return self
74 return self
74 return super(repofilecache, self).__get__(repo.unfiltered(), type)
75 return super(repofilecache, self).__get__(repo.unfiltered(), type)
75 def __set__(self, repo, value):
76 def __set__(self, repo, value):
76 return super(repofilecache, self).__set__(repo.unfiltered(), value)
77 return super(repofilecache, self).__set__(repo.unfiltered(), value)
77 def __delete__(self, repo):
78 def __delete__(self, repo):
78 return super(repofilecache, self).__delete__(repo.unfiltered())
79 return super(repofilecache, self).__delete__(repo.unfiltered())
79
80
80 class storecache(repofilecache):
81 class storecache(repofilecache):
81 """filecache for files in the store"""
82 """filecache for files in the store"""
82 def join(self, obj, fname):
83 def join(self, obj, fname):
83 return obj.sjoin(fname)
84 return obj.sjoin(fname)
84
85
85 class unfilteredpropertycache(util.propertycache):
86 class unfilteredpropertycache(util.propertycache):
86 """propertycache that apply to unfiltered repo only"""
87 """propertycache that apply to unfiltered repo only"""
87
88
88 def __get__(self, repo, type=None):
89 def __get__(self, repo, type=None):
89 unfi = repo.unfiltered()
90 unfi = repo.unfiltered()
90 if unfi is repo:
91 if unfi is repo:
91 return super(unfilteredpropertycache, self).__get__(unfi)
92 return super(unfilteredpropertycache, self).__get__(unfi)
92 return getattr(unfi, self.name)
93 return getattr(unfi, self.name)
93
94
94 class filteredpropertycache(util.propertycache):
95 class filteredpropertycache(util.propertycache):
95 """propertycache that must take filtering in account"""
96 """propertycache that must take filtering in account"""
96
97
97 def cachevalue(self, obj, value):
98 def cachevalue(self, obj, value):
98 object.__setattr__(obj, self.name, value)
99 object.__setattr__(obj, self.name, value)
99
100
100
101
101 def hasunfilteredcache(repo, name):
102 def hasunfilteredcache(repo, name):
102 """check if a repo has an unfilteredpropertycache value for <name>"""
103 """check if a repo has an unfilteredpropertycache value for <name>"""
103 return name in vars(repo.unfiltered())
104 return name in vars(repo.unfiltered())
104
105
105 def unfilteredmethod(orig):
106 def unfilteredmethod(orig):
106 """decorate method that always need to be run on unfiltered version"""
107 """decorate method that always need to be run on unfiltered version"""
107 def wrapper(repo, *args, **kwargs):
108 def wrapper(repo, *args, **kwargs):
108 return orig(repo.unfiltered(), *args, **kwargs)
109 return orig(repo.unfiltered(), *args, **kwargs)
109 return wrapper
110 return wrapper
110
111
111 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
112 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
112 'unbundle'))
113 'unbundle'))
113 legacycaps = moderncaps.union(set(['changegroupsubset']))
114 legacycaps = moderncaps.union(set(['changegroupsubset']))
114
115
115 class localpeer(peer.peerrepository):
116 class localpeer(peer.peerrepository):
116 '''peer for a local repo; reflects only the most recent API'''
117 '''peer for a local repo; reflects only the most recent API'''
117
118
118 def __init__(self, repo, caps=moderncaps):
119 def __init__(self, repo, caps=moderncaps):
119 peer.peerrepository.__init__(self)
120 peer.peerrepository.__init__(self)
120 self._repo = repo.filtered('served')
121 self._repo = repo.filtered('served')
121 self.ui = repo.ui
122 self.ui = repo.ui
122 self._caps = repo._restrictcapabilities(caps)
123 self._caps = repo._restrictcapabilities(caps)
123 self.requirements = repo.requirements
124 self.requirements = repo.requirements
124 self.supportedformats = repo.supportedformats
125 self.supportedformats = repo.supportedformats
125
126
126 def close(self):
127 def close(self):
127 self._repo.close()
128 self._repo.close()
128
129
129 def _capabilities(self):
130 def _capabilities(self):
130 return self._caps
131 return self._caps
131
132
132 def local(self):
133 def local(self):
133 return self._repo
134 return self._repo
134
135
135 def canpush(self):
136 def canpush(self):
136 return True
137 return True
137
138
138 def url(self):
139 def url(self):
139 return self._repo.url()
140 return self._repo.url()
140
141
141 def lookup(self, key):
142 def lookup(self, key):
142 return self._repo.lookup(key)
143 return self._repo.lookup(key)
143
144
144 def branchmap(self):
145 def branchmap(self):
145 return self._repo.branchmap()
146 return self._repo.branchmap()
146
147
147 def heads(self):
148 def heads(self):
148 return self._repo.heads()
149 return self._repo.heads()
149
150
150 def known(self, nodes):
151 def known(self, nodes):
151 return self._repo.known(nodes)
152 return self._repo.known(nodes)
152
153
153 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
154 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
154 **kwargs):
155 **kwargs):
155 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
156 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
156 common=common, bundlecaps=bundlecaps,
157 common=common, bundlecaps=bundlecaps,
157 **kwargs)
158 **kwargs)
158 cb = util.chunkbuffer(chunks)
159 cb = util.chunkbuffer(chunks)
159
160
160 if bundlecaps is not None and 'HG20' in bundlecaps:
161 if bundlecaps is not None and 'HG20' in bundlecaps:
161 # When requesting a bundle2, getbundle returns a stream to make the
162 # When requesting a bundle2, getbundle returns a stream to make the
162 # wire level function happier. We need to build a proper object
163 # wire level function happier. We need to build a proper object
163 # from it in local peer.
164 # from it in local peer.
164 return bundle2.getunbundler(self.ui, cb)
165 return bundle2.getunbundler(self.ui, cb)
165 else:
166 else:
166 return changegroup.getunbundler('01', cb, None)
167 return changegroup.getunbundler('01', cb, None)
167
168
168 # TODO We might want to move the next two calls into legacypeer and add
169 # TODO We might want to move the next two calls into legacypeer and add
169 # unbundle instead.
170 # unbundle instead.
170
171
171 def unbundle(self, cg, heads, url):
172 def unbundle(self, cg, heads, url):
172 """apply a bundle on a repo
173 """apply a bundle on a repo
173
174
174 This function handles the repo locking itself."""
175 This function handles the repo locking itself."""
175 try:
176 try:
176 try:
177 try:
177 cg = exchange.readbundle(self.ui, cg, None)
178 cg = exchange.readbundle(self.ui, cg, None)
178 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
179 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
179 if util.safehasattr(ret, 'getchunks'):
180 if util.safehasattr(ret, 'getchunks'):
180 # This is a bundle20 object, turn it into an unbundler.
181 # This is a bundle20 object, turn it into an unbundler.
181 # This little dance should be dropped eventually when the
182 # This little dance should be dropped eventually when the
182 # API is finally improved.
183 # API is finally improved.
183 stream = util.chunkbuffer(ret.getchunks())
184 stream = util.chunkbuffer(ret.getchunks())
184 ret = bundle2.getunbundler(self.ui, stream)
185 ret = bundle2.getunbundler(self.ui, stream)
185 return ret
186 return ret
186 except Exception as exc:
187 except Exception as exc:
187 # If the exception contains output salvaged from a bundle2
188 # If the exception contains output salvaged from a bundle2
188 # reply, we need to make sure it is printed before continuing
189 # reply, we need to make sure it is printed before continuing
189 # to fail. So we build a bundle2 with such output and consume
190 # to fail. So we build a bundle2 with such output and consume
190 # it directly.
191 # it directly.
191 #
192 #
192 # This is not very elegant but allows a "simple" solution for
193 # This is not very elegant but allows a "simple" solution for
193 # issue4594
194 # issue4594
194 output = getattr(exc, '_bundle2salvagedoutput', ())
195 output = getattr(exc, '_bundle2salvagedoutput', ())
195 if output:
196 if output:
196 bundler = bundle2.bundle20(self._repo.ui)
197 bundler = bundle2.bundle20(self._repo.ui)
197 for out in output:
198 for out in output:
198 bundler.addpart(out)
199 bundler.addpart(out)
199 stream = util.chunkbuffer(bundler.getchunks())
200 stream = util.chunkbuffer(bundler.getchunks())
200 b = bundle2.getunbundler(self.ui, stream)
201 b = bundle2.getunbundler(self.ui, stream)
201 bundle2.processbundle(self._repo, b)
202 bundle2.processbundle(self._repo, b)
202 raise
203 raise
203 except error.PushRaced as exc:
204 except error.PushRaced as exc:
204 raise error.ResponseError(_('push failed:'), str(exc))
205 raise error.ResponseError(_('push failed:'), str(exc))
205
206
206 def lock(self):
207 def lock(self):
207 return self._repo.lock()
208 return self._repo.lock()
208
209
209 def addchangegroup(self, cg, source, url):
210 def addchangegroup(self, cg, source, url):
210 return cg.apply(self._repo, source, url)
211 return cg.apply(self._repo, source, url)
211
212
212 def pushkey(self, namespace, key, old, new):
213 def pushkey(self, namespace, key, old, new):
213 return self._repo.pushkey(namespace, key, old, new)
214 return self._repo.pushkey(namespace, key, old, new)
214
215
215 def listkeys(self, namespace):
216 def listkeys(self, namespace):
216 return self._repo.listkeys(namespace)
217 return self._repo.listkeys(namespace)
217
218
218 def debugwireargs(self, one, two, three=None, four=None, five=None):
219 def debugwireargs(self, one, two, three=None, four=None, five=None):
219 '''used to test argument passing over the wire'''
220 '''used to test argument passing over the wire'''
220 return "%s %s %s %s %s" % (one, two, three, four, five)
221 return "%s %s %s %s %s" % (one, two, three, four, five)
221
222
222 class locallegacypeer(localpeer):
223 class locallegacypeer(localpeer):
223 '''peer extension which implements legacy methods too; used for tests with
224 '''peer extension which implements legacy methods too; used for tests with
224 restricted capabilities'''
225 restricted capabilities'''
225
226
226 def __init__(self, repo):
227 def __init__(self, repo):
227 localpeer.__init__(self, repo, caps=legacycaps)
228 localpeer.__init__(self, repo, caps=legacycaps)
228
229
229 def branches(self, nodes):
230 def branches(self, nodes):
230 return self._repo.branches(nodes)
231 return self._repo.branches(nodes)
231
232
232 def between(self, pairs):
233 def between(self, pairs):
233 return self._repo.between(pairs)
234 return self._repo.between(pairs)
234
235
235 def changegroup(self, basenodes, source):
236 def changegroup(self, basenodes, source):
236 return changegroup.changegroup(self._repo, basenodes, source)
237 return changegroup.changegroup(self._repo, basenodes, source)
237
238
238 def changegroupsubset(self, bases, heads, source):
239 def changegroupsubset(self, bases, heads, source):
239 return changegroup.changegroupsubset(self._repo, bases, heads, source)
240 return changegroup.changegroupsubset(self._repo, bases, heads, source)
240
241
241 class localrepository(object):
242 class localrepository(object):
242
243
243 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
244 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
244 'manifestv2'))
245 'manifestv2'))
245 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
246 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
246 'dotencode'))
247 'dotencode'))
247 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
248 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
248 filtername = None
249 filtername = None
249
250
250 # a list of (ui, featureset) functions.
251 # a list of (ui, featureset) functions.
251 # only functions defined in module of enabled extensions are invoked
252 # only functions defined in module of enabled extensions are invoked
252 featuresetupfuncs = set()
253 featuresetupfuncs = set()
253
254
254 def __init__(self, baseui, path, create=False):
255 def __init__(self, baseui, path, create=False):
255 self.requirements = set()
256 self.requirements = set()
256 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
257 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
257 self.wopener = self.wvfs
258 self.wopener = self.wvfs
258 self.root = self.wvfs.base
259 self.root = self.wvfs.base
259 self.path = self.wvfs.join(".hg")
260 self.path = self.wvfs.join(".hg")
260 self.origroot = path
261 self.origroot = path
261 self.auditor = pathutil.pathauditor(self.root, self._checknested)
262 self.auditor = pathutil.pathauditor(self.root, self._checknested)
262 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
263 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
263 realfs=False)
264 realfs=False)
264 self.vfs = scmutil.vfs(self.path)
265 self.vfs = scmutil.vfs(self.path)
265 self.opener = self.vfs
266 self.opener = self.vfs
266 self.baseui = baseui
267 self.baseui = baseui
267 self.ui = baseui.copy()
268 self.ui = baseui.copy()
268 self.ui.copy = baseui.copy # prevent copying repo configuration
269 self.ui.copy = baseui.copy # prevent copying repo configuration
269 # A list of callback to shape the phase if no data were found.
270 # A list of callback to shape the phase if no data were found.
270 # Callback are in the form: func(repo, roots) --> processed root.
271 # Callback are in the form: func(repo, roots) --> processed root.
271 # This list it to be filled by extension during repo setup
272 # This list it to be filled by extension during repo setup
272 self._phasedefaults = []
273 self._phasedefaults = []
273 try:
274 try:
274 self.ui.readconfig(self.join("hgrc"), self.root)
275 self.ui.readconfig(self.join("hgrc"), self.root)
275 self._loadextensions()
276 self._loadextensions()
276 except IOError:
277 except IOError:
277 pass
278 pass
278
279
279 if self.featuresetupfuncs:
280 if self.featuresetupfuncs:
280 self.supported = set(self._basesupported) # use private copy
281 self.supported = set(self._basesupported) # use private copy
281 extmods = set(m.__name__ for n, m
282 extmods = set(m.__name__ for n, m
282 in extensions.extensions(self.ui))
283 in extensions.extensions(self.ui))
283 for setupfunc in self.featuresetupfuncs:
284 for setupfunc in self.featuresetupfuncs:
284 if setupfunc.__module__ in extmods:
285 if setupfunc.__module__ in extmods:
285 setupfunc(self.ui, self.supported)
286 setupfunc(self.ui, self.supported)
286 else:
287 else:
287 self.supported = self._basesupported
288 self.supported = self._basesupported
289 color.setup(self.ui)
288
290
289 # Add compression engines.
291 # Add compression engines.
290 for name in util.compengines:
292 for name in util.compengines:
291 engine = util.compengines[name]
293 engine = util.compengines[name]
292 if engine.revlogheader():
294 if engine.revlogheader():
293 self.supported.add('exp-compression-%s' % name)
295 self.supported.add('exp-compression-%s' % name)
294
296
295 if not self.vfs.isdir():
297 if not self.vfs.isdir():
296 if create:
298 if create:
297 self.requirements = newreporequirements(self)
299 self.requirements = newreporequirements(self)
298
300
299 if not self.wvfs.exists():
301 if not self.wvfs.exists():
300 self.wvfs.makedirs()
302 self.wvfs.makedirs()
301 self.vfs.makedir(notindexed=True)
303 self.vfs.makedir(notindexed=True)
302
304
303 if 'store' in self.requirements:
305 if 'store' in self.requirements:
304 self.vfs.mkdir("store")
306 self.vfs.mkdir("store")
305
307
306 # create an invalid changelog
308 # create an invalid changelog
307 self.vfs.append(
309 self.vfs.append(
308 "00changelog.i",
310 "00changelog.i",
309 '\0\0\0\2' # represents revlogv2
311 '\0\0\0\2' # represents revlogv2
310 ' dummy changelog to prevent using the old repo layout'
312 ' dummy changelog to prevent using the old repo layout'
311 )
313 )
312 else:
314 else:
313 raise error.RepoError(_("repository %s not found") % path)
315 raise error.RepoError(_("repository %s not found") % path)
314 elif create:
316 elif create:
315 raise error.RepoError(_("repository %s already exists") % path)
317 raise error.RepoError(_("repository %s already exists") % path)
316 else:
318 else:
317 try:
319 try:
318 self.requirements = scmutil.readrequires(
320 self.requirements = scmutil.readrequires(
319 self.vfs, self.supported)
321 self.vfs, self.supported)
320 except IOError as inst:
322 except IOError as inst:
321 if inst.errno != errno.ENOENT:
323 if inst.errno != errno.ENOENT:
322 raise
324 raise
323
325
324 self.sharedpath = self.path
326 self.sharedpath = self.path
325 try:
327 try:
326 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
328 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
327 realpath=True)
329 realpath=True)
328 s = vfs.base
330 s = vfs.base
329 if not vfs.exists():
331 if not vfs.exists():
330 raise error.RepoError(
332 raise error.RepoError(
331 _('.hg/sharedpath points to nonexistent directory %s') % s)
333 _('.hg/sharedpath points to nonexistent directory %s') % s)
332 self.sharedpath = s
334 self.sharedpath = s
333 except IOError as inst:
335 except IOError as inst:
334 if inst.errno != errno.ENOENT:
336 if inst.errno != errno.ENOENT:
335 raise
337 raise
336
338
337 self.store = store.store(
339 self.store = store.store(
338 self.requirements, self.sharedpath, scmutil.vfs)
340 self.requirements, self.sharedpath, scmutil.vfs)
339 self.spath = self.store.path
341 self.spath = self.store.path
340 self.svfs = self.store.vfs
342 self.svfs = self.store.vfs
341 self.sjoin = self.store.join
343 self.sjoin = self.store.join
342 self.vfs.createmode = self.store.createmode
344 self.vfs.createmode = self.store.createmode
343 self._applyopenerreqs()
345 self._applyopenerreqs()
344 if create:
346 if create:
345 self._writerequirements()
347 self._writerequirements()
346
348
347 self._dirstatevalidatewarned = False
349 self._dirstatevalidatewarned = False
348
350
349 self._branchcaches = {}
351 self._branchcaches = {}
350 self._revbranchcache = None
352 self._revbranchcache = None
351 self.filterpats = {}
353 self.filterpats = {}
352 self._datafilters = {}
354 self._datafilters = {}
353 self._transref = self._lockref = self._wlockref = None
355 self._transref = self._lockref = self._wlockref = None
354
356
355 # A cache for various files under .hg/ that tracks file changes,
357 # A cache for various files under .hg/ that tracks file changes,
356 # (used by the filecache decorator)
358 # (used by the filecache decorator)
357 #
359 #
358 # Maps a property name to its util.filecacheentry
360 # Maps a property name to its util.filecacheentry
359 self._filecache = {}
361 self._filecache = {}
360
362
361 # hold sets of revision to be filtered
363 # hold sets of revision to be filtered
362 # should be cleared when something might have changed the filter value:
364 # should be cleared when something might have changed the filter value:
363 # - new changesets,
365 # - new changesets,
364 # - phase change,
366 # - phase change,
365 # - new obsolescence marker,
367 # - new obsolescence marker,
366 # - working directory parent change,
368 # - working directory parent change,
367 # - bookmark changes
369 # - bookmark changes
368 self.filteredrevcache = {}
370 self.filteredrevcache = {}
369
371
370 # generic mapping between names and nodes
372 # generic mapping between names and nodes
371 self.names = namespaces.namespaces()
373 self.names = namespaces.namespaces()
372
374
373 def close(self):
375 def close(self):
374 self._writecaches()
376 self._writecaches()
375
377
376 def _loadextensions(self):
378 def _loadextensions(self):
377 extensions.loadall(self.ui)
379 extensions.loadall(self.ui)
378
380
379 def _writecaches(self):
381 def _writecaches(self):
380 if self._revbranchcache:
382 if self._revbranchcache:
381 self._revbranchcache.write()
383 self._revbranchcache.write()
382
384
383 def _restrictcapabilities(self, caps):
385 def _restrictcapabilities(self, caps):
384 if self.ui.configbool('experimental', 'bundle2-advertise', True):
386 if self.ui.configbool('experimental', 'bundle2-advertise', True):
385 caps = set(caps)
387 caps = set(caps)
386 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
388 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
387 caps.add('bundle2=' + urlreq.quote(capsblob))
389 caps.add('bundle2=' + urlreq.quote(capsblob))
388 return caps
390 return caps
389
391
390 def _applyopenerreqs(self):
392 def _applyopenerreqs(self):
391 self.svfs.options = dict((r, 1) for r in self.requirements
393 self.svfs.options = dict((r, 1) for r in self.requirements
392 if r in self.openerreqs)
394 if r in self.openerreqs)
393 # experimental config: format.chunkcachesize
395 # experimental config: format.chunkcachesize
394 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
396 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
395 if chunkcachesize is not None:
397 if chunkcachesize is not None:
396 self.svfs.options['chunkcachesize'] = chunkcachesize
398 self.svfs.options['chunkcachesize'] = chunkcachesize
397 # experimental config: format.maxchainlen
399 # experimental config: format.maxchainlen
398 maxchainlen = self.ui.configint('format', 'maxchainlen')
400 maxchainlen = self.ui.configint('format', 'maxchainlen')
399 if maxchainlen is not None:
401 if maxchainlen is not None:
400 self.svfs.options['maxchainlen'] = maxchainlen
402 self.svfs.options['maxchainlen'] = maxchainlen
401 # experimental config: format.manifestcachesize
403 # experimental config: format.manifestcachesize
402 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
404 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
403 if manifestcachesize is not None:
405 if manifestcachesize is not None:
404 self.svfs.options['manifestcachesize'] = manifestcachesize
406 self.svfs.options['manifestcachesize'] = manifestcachesize
405 # experimental config: format.aggressivemergedeltas
407 # experimental config: format.aggressivemergedeltas
406 aggressivemergedeltas = self.ui.configbool('format',
408 aggressivemergedeltas = self.ui.configbool('format',
407 'aggressivemergedeltas', False)
409 'aggressivemergedeltas', False)
408 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
410 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
409 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
411 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
410
412
411 for r in self.requirements:
413 for r in self.requirements:
412 if r.startswith('exp-compression-'):
414 if r.startswith('exp-compression-'):
413 self.svfs.options['compengine'] = r[len('exp-compression-'):]
415 self.svfs.options['compengine'] = r[len('exp-compression-'):]
414
416
415 def _writerequirements(self):
417 def _writerequirements(self):
416 scmutil.writerequires(self.vfs, self.requirements)
418 scmutil.writerequires(self.vfs, self.requirements)
417
419
418 def _checknested(self, path):
420 def _checknested(self, path):
419 """Determine if path is a legal nested repository."""
421 """Determine if path is a legal nested repository."""
420 if not path.startswith(self.root):
422 if not path.startswith(self.root):
421 return False
423 return False
422 subpath = path[len(self.root) + 1:]
424 subpath = path[len(self.root) + 1:]
423 normsubpath = util.pconvert(subpath)
425 normsubpath = util.pconvert(subpath)
424
426
425 # XXX: Checking against the current working copy is wrong in
427 # XXX: Checking against the current working copy is wrong in
426 # the sense that it can reject things like
428 # the sense that it can reject things like
427 #
429 #
428 # $ hg cat -r 10 sub/x.txt
430 # $ hg cat -r 10 sub/x.txt
429 #
431 #
430 # if sub/ is no longer a subrepository in the working copy
432 # if sub/ is no longer a subrepository in the working copy
431 # parent revision.
433 # parent revision.
432 #
434 #
433 # However, it can of course also allow things that would have
435 # However, it can of course also allow things that would have
434 # been rejected before, such as the above cat command if sub/
436 # been rejected before, such as the above cat command if sub/
435 # is a subrepository now, but was a normal directory before.
437 # is a subrepository now, but was a normal directory before.
436 # The old path auditor would have rejected by mistake since it
438 # The old path auditor would have rejected by mistake since it
437 # panics when it sees sub/.hg/.
439 # panics when it sees sub/.hg/.
438 #
440 #
439 # All in all, checking against the working copy seems sensible
441 # All in all, checking against the working copy seems sensible
440 # since we want to prevent access to nested repositories on
442 # since we want to prevent access to nested repositories on
441 # the filesystem *now*.
443 # the filesystem *now*.
442 ctx = self[None]
444 ctx = self[None]
443 parts = util.splitpath(subpath)
445 parts = util.splitpath(subpath)
444 while parts:
446 while parts:
445 prefix = '/'.join(parts)
447 prefix = '/'.join(parts)
446 if prefix in ctx.substate:
448 if prefix in ctx.substate:
447 if prefix == normsubpath:
449 if prefix == normsubpath:
448 return True
450 return True
449 else:
451 else:
450 sub = ctx.sub(prefix)
452 sub = ctx.sub(prefix)
451 return sub.checknested(subpath[len(prefix) + 1:])
453 return sub.checknested(subpath[len(prefix) + 1:])
452 else:
454 else:
453 parts.pop()
455 parts.pop()
454 return False
456 return False
455
457
456 def peer(self):
458 def peer(self):
457 return localpeer(self) # not cached to avoid reference cycle
459 return localpeer(self) # not cached to avoid reference cycle
458
460
459 def unfiltered(self):
461 def unfiltered(self):
460 """Return unfiltered version of the repository
462 """Return unfiltered version of the repository
461
463
462 Intended to be overwritten by filtered repo."""
464 Intended to be overwritten by filtered repo."""
463 return self
465 return self
464
466
465 def filtered(self, name):
467 def filtered(self, name):
466 """Return a filtered version of a repository"""
468 """Return a filtered version of a repository"""
467 # build a new class with the mixin and the current class
469 # build a new class with the mixin and the current class
468 # (possibly subclass of the repo)
470 # (possibly subclass of the repo)
469 class proxycls(repoview.repoview, self.unfiltered().__class__):
471 class proxycls(repoview.repoview, self.unfiltered().__class__):
470 pass
472 pass
471 return proxycls(self, name)
473 return proxycls(self, name)
472
474
473 @repofilecache('bookmarks', 'bookmarks.current')
475 @repofilecache('bookmarks', 'bookmarks.current')
474 def _bookmarks(self):
476 def _bookmarks(self):
475 return bookmarks.bmstore(self)
477 return bookmarks.bmstore(self)
476
478
477 @property
479 @property
478 def _activebookmark(self):
480 def _activebookmark(self):
479 return self._bookmarks.active
481 return self._bookmarks.active
480
482
481 def bookmarkheads(self, bookmark):
483 def bookmarkheads(self, bookmark):
482 name = bookmark.split('@', 1)[0]
484 name = bookmark.split('@', 1)[0]
483 heads = []
485 heads = []
484 for mark, n in self._bookmarks.iteritems():
486 for mark, n in self._bookmarks.iteritems():
485 if mark.split('@', 1)[0] == name:
487 if mark.split('@', 1)[0] == name:
486 heads.append(n)
488 heads.append(n)
487 return heads
489 return heads
488
490
489 # _phaserevs and _phasesets depend on changelog. what we need is to
491 # _phaserevs and _phasesets depend on changelog. what we need is to
490 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
492 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
491 # can't be easily expressed in filecache mechanism.
493 # can't be easily expressed in filecache mechanism.
492 @storecache('phaseroots', '00changelog.i')
494 @storecache('phaseroots', '00changelog.i')
493 def _phasecache(self):
495 def _phasecache(self):
494 return phases.phasecache(self, self._phasedefaults)
496 return phases.phasecache(self, self._phasedefaults)
495
497
496 @storecache('obsstore')
498 @storecache('obsstore')
497 def obsstore(self):
499 def obsstore(self):
498 # read default format for new obsstore.
500 # read default format for new obsstore.
499 # developer config: format.obsstore-version
501 # developer config: format.obsstore-version
500 defaultformat = self.ui.configint('format', 'obsstore-version', None)
502 defaultformat = self.ui.configint('format', 'obsstore-version', None)
501 # rely on obsstore class default when possible.
503 # rely on obsstore class default when possible.
502 kwargs = {}
504 kwargs = {}
503 if defaultformat is not None:
505 if defaultformat is not None:
504 kwargs['defaultformat'] = defaultformat
506 kwargs['defaultformat'] = defaultformat
505 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
507 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
506 store = obsolete.obsstore(self.svfs, readonly=readonly,
508 store = obsolete.obsstore(self.svfs, readonly=readonly,
507 **kwargs)
509 **kwargs)
508 if store and readonly:
510 if store and readonly:
509 self.ui.warn(
511 self.ui.warn(
510 _('obsolete feature not enabled but %i markers found!\n')
512 _('obsolete feature not enabled but %i markers found!\n')
511 % len(list(store)))
513 % len(list(store)))
512 return store
514 return store
513
515
514 @storecache('00changelog.i')
516 @storecache('00changelog.i')
515 def changelog(self):
517 def changelog(self):
516 c = changelog.changelog(self.svfs)
518 c = changelog.changelog(self.svfs)
517 if txnutil.mayhavepending(self.root):
519 if txnutil.mayhavepending(self.root):
518 c.readpending('00changelog.i.a')
520 c.readpending('00changelog.i.a')
519 return c
521 return c
520
522
521 def _constructmanifest(self):
523 def _constructmanifest(self):
522 # This is a temporary function while we migrate from manifest to
524 # This is a temporary function while we migrate from manifest to
523 # manifestlog. It allows bundlerepo and unionrepo to intercept the
525 # manifestlog. It allows bundlerepo and unionrepo to intercept the
524 # manifest creation.
526 # manifest creation.
525 return manifest.manifestrevlog(self.svfs)
527 return manifest.manifestrevlog(self.svfs)
526
528
527 @storecache('00manifest.i')
529 @storecache('00manifest.i')
528 def manifestlog(self):
530 def manifestlog(self):
529 return manifest.manifestlog(self.svfs, self)
531 return manifest.manifestlog(self.svfs, self)
530
532
531 @repofilecache('dirstate')
533 @repofilecache('dirstate')
532 def dirstate(self):
534 def dirstate(self):
533 return dirstate.dirstate(self.vfs, self.ui, self.root,
535 return dirstate.dirstate(self.vfs, self.ui, self.root,
534 self._dirstatevalidate)
536 self._dirstatevalidate)
535
537
536 def _dirstatevalidate(self, node):
538 def _dirstatevalidate(self, node):
537 try:
539 try:
538 self.changelog.rev(node)
540 self.changelog.rev(node)
539 return node
541 return node
540 except error.LookupError:
542 except error.LookupError:
541 if not self._dirstatevalidatewarned:
543 if not self._dirstatevalidatewarned:
542 self._dirstatevalidatewarned = True
544 self._dirstatevalidatewarned = True
543 self.ui.warn(_("warning: ignoring unknown"
545 self.ui.warn(_("warning: ignoring unknown"
544 " working parent %s!\n") % short(node))
546 " working parent %s!\n") % short(node))
545 return nullid
547 return nullid
546
548
547 def __getitem__(self, changeid):
549 def __getitem__(self, changeid):
548 if changeid is None or changeid == wdirrev:
550 if changeid is None or changeid == wdirrev:
549 return context.workingctx(self)
551 return context.workingctx(self)
550 if isinstance(changeid, slice):
552 if isinstance(changeid, slice):
551 return [context.changectx(self, i)
553 return [context.changectx(self, i)
552 for i in xrange(*changeid.indices(len(self)))
554 for i in xrange(*changeid.indices(len(self)))
553 if i not in self.changelog.filteredrevs]
555 if i not in self.changelog.filteredrevs]
554 return context.changectx(self, changeid)
556 return context.changectx(self, changeid)
555
557
556 def __contains__(self, changeid):
558 def __contains__(self, changeid):
557 try:
559 try:
558 self[changeid]
560 self[changeid]
559 return True
561 return True
560 except error.RepoLookupError:
562 except error.RepoLookupError:
561 return False
563 return False
562
564
563 def __nonzero__(self):
565 def __nonzero__(self):
564 return True
566 return True
565
567
566 def __len__(self):
568 def __len__(self):
567 return len(self.changelog)
569 return len(self.changelog)
568
570
569 def __iter__(self):
571 def __iter__(self):
570 return iter(self.changelog)
572 return iter(self.changelog)
571
573
572 def revs(self, expr, *args):
574 def revs(self, expr, *args):
573 '''Find revisions matching a revset.
575 '''Find revisions matching a revset.
574
576
575 The revset is specified as a string ``expr`` that may contain
577 The revset is specified as a string ``expr`` that may contain
576 %-formatting to escape certain types. See ``revsetlang.formatspec``.
578 %-formatting to escape certain types. See ``revsetlang.formatspec``.
577
579
578 Revset aliases from the configuration are not expanded. To expand
580 Revset aliases from the configuration are not expanded. To expand
579 user aliases, consider calling ``scmutil.revrange()`` or
581 user aliases, consider calling ``scmutil.revrange()`` or
580 ``repo.anyrevs([expr], user=True)``.
582 ``repo.anyrevs([expr], user=True)``.
581
583
582 Returns a revset.abstractsmartset, which is a list-like interface
584 Returns a revset.abstractsmartset, which is a list-like interface
583 that contains integer revisions.
585 that contains integer revisions.
584 '''
586 '''
585 expr = revsetlang.formatspec(expr, *args)
587 expr = revsetlang.formatspec(expr, *args)
586 m = revset.match(None, expr)
588 m = revset.match(None, expr)
587 return m(self)
589 return m(self)
588
590
589 def set(self, expr, *args):
591 def set(self, expr, *args):
590 '''Find revisions matching a revset and emit changectx instances.
592 '''Find revisions matching a revset and emit changectx instances.
591
593
592 This is a convenience wrapper around ``revs()`` that iterates the
594 This is a convenience wrapper around ``revs()`` that iterates the
593 result and is a generator of changectx instances.
595 result and is a generator of changectx instances.
594
596
595 Revset aliases from the configuration are not expanded. To expand
597 Revset aliases from the configuration are not expanded. To expand
596 user aliases, consider calling ``scmutil.revrange()``.
598 user aliases, consider calling ``scmutil.revrange()``.
597 '''
599 '''
598 for r in self.revs(expr, *args):
600 for r in self.revs(expr, *args):
599 yield self[r]
601 yield self[r]
600
602
601 def anyrevs(self, specs, user=False):
603 def anyrevs(self, specs, user=False):
602 '''Find revisions matching one of the given revsets.
604 '''Find revisions matching one of the given revsets.
603
605
604 Revset aliases from the configuration are not expanded by default. To
606 Revset aliases from the configuration are not expanded by default. To
605 expand user aliases, specify ``user=True``.
607 expand user aliases, specify ``user=True``.
606 '''
608 '''
607 if user:
609 if user:
608 m = revset.matchany(self.ui, specs, repo=self)
610 m = revset.matchany(self.ui, specs, repo=self)
609 else:
611 else:
610 m = revset.matchany(None, specs)
612 m = revset.matchany(None, specs)
611 return m(self)
613 return m(self)
612
614
613 def url(self):
615 def url(self):
614 return 'file:' + self.root
616 return 'file:' + self.root
615
617
616 def hook(self, name, throw=False, **args):
618 def hook(self, name, throw=False, **args):
617 """Call a hook, passing this repo instance.
619 """Call a hook, passing this repo instance.
618
620
619 This a convenience method to aid invoking hooks. Extensions likely
621 This a convenience method to aid invoking hooks. Extensions likely
620 won't call this unless they have registered a custom hook or are
622 won't call this unless they have registered a custom hook or are
621 replacing code that is expected to call a hook.
623 replacing code that is expected to call a hook.
622 """
624 """
623 return hook.hook(self.ui, self, name, throw, **args)
625 return hook.hook(self.ui, self, name, throw, **args)
624
626
625 @unfilteredmethod
627 @unfilteredmethod
626 def _tag(self, names, node, message, local, user, date, extra=None,
628 def _tag(self, names, node, message, local, user, date, extra=None,
627 editor=False):
629 editor=False):
628 if isinstance(names, str):
630 if isinstance(names, str):
629 names = (names,)
631 names = (names,)
630
632
631 branches = self.branchmap()
633 branches = self.branchmap()
632 for name in names:
634 for name in names:
633 self.hook('pretag', throw=True, node=hex(node), tag=name,
635 self.hook('pretag', throw=True, node=hex(node), tag=name,
634 local=local)
636 local=local)
635 if name in branches:
637 if name in branches:
636 self.ui.warn(_("warning: tag %s conflicts with existing"
638 self.ui.warn(_("warning: tag %s conflicts with existing"
637 " branch name\n") % name)
639 " branch name\n") % name)
638
640
639 def writetags(fp, names, munge, prevtags):
641 def writetags(fp, names, munge, prevtags):
640 fp.seek(0, 2)
642 fp.seek(0, 2)
641 if prevtags and prevtags[-1] != '\n':
643 if prevtags and prevtags[-1] != '\n':
642 fp.write('\n')
644 fp.write('\n')
643 for name in names:
645 for name in names:
644 if munge:
646 if munge:
645 m = munge(name)
647 m = munge(name)
646 else:
648 else:
647 m = name
649 m = name
648
650
649 if (self._tagscache.tagtypes and
651 if (self._tagscache.tagtypes and
650 name in self._tagscache.tagtypes):
652 name in self._tagscache.tagtypes):
651 old = self.tags().get(name, nullid)
653 old = self.tags().get(name, nullid)
652 fp.write('%s %s\n' % (hex(old), m))
654 fp.write('%s %s\n' % (hex(old), m))
653 fp.write('%s %s\n' % (hex(node), m))
655 fp.write('%s %s\n' % (hex(node), m))
654 fp.close()
656 fp.close()
655
657
656 prevtags = ''
658 prevtags = ''
657 if local:
659 if local:
658 try:
660 try:
659 fp = self.vfs('localtags', 'r+')
661 fp = self.vfs('localtags', 'r+')
660 except IOError:
662 except IOError:
661 fp = self.vfs('localtags', 'a')
663 fp = self.vfs('localtags', 'a')
662 else:
664 else:
663 prevtags = fp.read()
665 prevtags = fp.read()
664
666
665 # local tags are stored in the current charset
667 # local tags are stored in the current charset
666 writetags(fp, names, None, prevtags)
668 writetags(fp, names, None, prevtags)
667 for name in names:
669 for name in names:
668 self.hook('tag', node=hex(node), tag=name, local=local)
670 self.hook('tag', node=hex(node), tag=name, local=local)
669 return
671 return
670
672
671 try:
673 try:
672 fp = self.wfile('.hgtags', 'rb+')
674 fp = self.wfile('.hgtags', 'rb+')
673 except IOError as e:
675 except IOError as e:
674 if e.errno != errno.ENOENT:
676 if e.errno != errno.ENOENT:
675 raise
677 raise
676 fp = self.wfile('.hgtags', 'ab')
678 fp = self.wfile('.hgtags', 'ab')
677 else:
679 else:
678 prevtags = fp.read()
680 prevtags = fp.read()
679
681
680 # committed tags are stored in UTF-8
682 # committed tags are stored in UTF-8
681 writetags(fp, names, encoding.fromlocal, prevtags)
683 writetags(fp, names, encoding.fromlocal, prevtags)
682
684
683 fp.close()
685 fp.close()
684
686
685 self.invalidatecaches()
687 self.invalidatecaches()
686
688
687 if '.hgtags' not in self.dirstate:
689 if '.hgtags' not in self.dirstate:
688 self[None].add(['.hgtags'])
690 self[None].add(['.hgtags'])
689
691
690 m = matchmod.exact(self.root, '', ['.hgtags'])
692 m = matchmod.exact(self.root, '', ['.hgtags'])
691 tagnode = self.commit(message, user, date, extra=extra, match=m,
693 tagnode = self.commit(message, user, date, extra=extra, match=m,
692 editor=editor)
694 editor=editor)
693
695
694 for name in names:
696 for name in names:
695 self.hook('tag', node=hex(node), tag=name, local=local)
697 self.hook('tag', node=hex(node), tag=name, local=local)
696
698
697 return tagnode
699 return tagnode
698
700
699 def tag(self, names, node, message, local, user, date, editor=False):
701 def tag(self, names, node, message, local, user, date, editor=False):
700 '''tag a revision with one or more symbolic names.
702 '''tag a revision with one or more symbolic names.
701
703
702 names is a list of strings or, when adding a single tag, names may be a
704 names is a list of strings or, when adding a single tag, names may be a
703 string.
705 string.
704
706
705 if local is True, the tags are stored in a per-repository file.
707 if local is True, the tags are stored in a per-repository file.
706 otherwise, they are stored in the .hgtags file, and a new
708 otherwise, they are stored in the .hgtags file, and a new
707 changeset is committed with the change.
709 changeset is committed with the change.
708
710
709 keyword arguments:
711 keyword arguments:
710
712
711 local: whether to store tags in non-version-controlled file
713 local: whether to store tags in non-version-controlled file
712 (default False)
714 (default False)
713
715
714 message: commit message to use if committing
716 message: commit message to use if committing
715
717
716 user: name of user to use if committing
718 user: name of user to use if committing
717
719
718 date: date tuple to use if committing'''
720 date: date tuple to use if committing'''
719
721
720 if not local:
722 if not local:
721 m = matchmod.exact(self.root, '', ['.hgtags'])
723 m = matchmod.exact(self.root, '', ['.hgtags'])
722 if any(self.status(match=m, unknown=True, ignored=True)):
724 if any(self.status(match=m, unknown=True, ignored=True)):
723 raise error.Abort(_('working copy of .hgtags is changed'),
725 raise error.Abort(_('working copy of .hgtags is changed'),
724 hint=_('please commit .hgtags manually'))
726 hint=_('please commit .hgtags manually'))
725
727
726 self.tags() # instantiate the cache
728 self.tags() # instantiate the cache
727 self._tag(names, node, message, local, user, date, editor=editor)
729 self._tag(names, node, message, local, user, date, editor=editor)
728
730
729 @filteredpropertycache
731 @filteredpropertycache
730 def _tagscache(self):
732 def _tagscache(self):
731 '''Returns a tagscache object that contains various tags related
733 '''Returns a tagscache object that contains various tags related
732 caches.'''
734 caches.'''
733
735
734 # This simplifies its cache management by having one decorated
736 # This simplifies its cache management by having one decorated
735 # function (this one) and the rest simply fetch things from it.
737 # function (this one) and the rest simply fetch things from it.
736 class tagscache(object):
738 class tagscache(object):
737 def __init__(self):
739 def __init__(self):
738 # These two define the set of tags for this repository. tags
740 # These two define the set of tags for this repository. tags
739 # maps tag name to node; tagtypes maps tag name to 'global' or
741 # maps tag name to node; tagtypes maps tag name to 'global' or
740 # 'local'. (Global tags are defined by .hgtags across all
742 # 'local'. (Global tags are defined by .hgtags across all
741 # heads, and local tags are defined in .hg/localtags.)
743 # heads, and local tags are defined in .hg/localtags.)
742 # They constitute the in-memory cache of tags.
744 # They constitute the in-memory cache of tags.
743 self.tags = self.tagtypes = None
745 self.tags = self.tagtypes = None
744
746
745 self.nodetagscache = self.tagslist = None
747 self.nodetagscache = self.tagslist = None
746
748
747 cache = tagscache()
749 cache = tagscache()
748 cache.tags, cache.tagtypes = self._findtags()
750 cache.tags, cache.tagtypes = self._findtags()
749
751
750 return cache
752 return cache
751
753
752 def tags(self):
754 def tags(self):
753 '''return a mapping of tag to node'''
755 '''return a mapping of tag to node'''
754 t = {}
756 t = {}
755 if self.changelog.filteredrevs:
757 if self.changelog.filteredrevs:
756 tags, tt = self._findtags()
758 tags, tt = self._findtags()
757 else:
759 else:
758 tags = self._tagscache.tags
760 tags = self._tagscache.tags
759 for k, v in tags.iteritems():
761 for k, v in tags.iteritems():
760 try:
762 try:
761 # ignore tags to unknown nodes
763 # ignore tags to unknown nodes
762 self.changelog.rev(v)
764 self.changelog.rev(v)
763 t[k] = v
765 t[k] = v
764 except (error.LookupError, ValueError):
766 except (error.LookupError, ValueError):
765 pass
767 pass
766 return t
768 return t
767
769
768 def _findtags(self):
770 def _findtags(self):
769 '''Do the hard work of finding tags. Return a pair of dicts
771 '''Do the hard work of finding tags. Return a pair of dicts
770 (tags, tagtypes) where tags maps tag name to node, and tagtypes
772 (tags, tagtypes) where tags maps tag name to node, and tagtypes
771 maps tag name to a string like \'global\' or \'local\'.
773 maps tag name to a string like \'global\' or \'local\'.
772 Subclasses or extensions are free to add their own tags, but
774 Subclasses or extensions are free to add their own tags, but
773 should be aware that the returned dicts will be retained for the
775 should be aware that the returned dicts will be retained for the
774 duration of the localrepo object.'''
776 duration of the localrepo object.'''
775
777
776 # XXX what tagtype should subclasses/extensions use? Currently
778 # XXX what tagtype should subclasses/extensions use? Currently
777 # mq and bookmarks add tags, but do not set the tagtype at all.
779 # mq and bookmarks add tags, but do not set the tagtype at all.
778 # Should each extension invent its own tag type? Should there
780 # Should each extension invent its own tag type? Should there
779 # be one tagtype for all such "virtual" tags? Or is the status
781 # be one tagtype for all such "virtual" tags? Or is the status
780 # quo fine?
782 # quo fine?
781
783
782 alltags = {} # map tag name to (node, hist)
784 alltags = {} # map tag name to (node, hist)
783 tagtypes = {}
785 tagtypes = {}
784
786
785 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
787 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
786 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
788 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
787
789
788 # Build the return dicts. Have to re-encode tag names because
790 # Build the return dicts. Have to re-encode tag names because
789 # the tags module always uses UTF-8 (in order not to lose info
791 # the tags module always uses UTF-8 (in order not to lose info
790 # writing to the cache), but the rest of Mercurial wants them in
792 # writing to the cache), but the rest of Mercurial wants them in
791 # local encoding.
793 # local encoding.
792 tags = {}
794 tags = {}
793 for (name, (node, hist)) in alltags.iteritems():
795 for (name, (node, hist)) in alltags.iteritems():
794 if node != nullid:
796 if node != nullid:
795 tags[encoding.tolocal(name)] = node
797 tags[encoding.tolocal(name)] = node
796 tags['tip'] = self.changelog.tip()
798 tags['tip'] = self.changelog.tip()
797 tagtypes = dict([(encoding.tolocal(name), value)
799 tagtypes = dict([(encoding.tolocal(name), value)
798 for (name, value) in tagtypes.iteritems()])
800 for (name, value) in tagtypes.iteritems()])
799 return (tags, tagtypes)
801 return (tags, tagtypes)
800
802
801 def tagtype(self, tagname):
803 def tagtype(self, tagname):
802 '''
804 '''
803 return the type of the given tag. result can be:
805 return the type of the given tag. result can be:
804
806
805 'local' : a local tag
807 'local' : a local tag
806 'global' : a global tag
808 'global' : a global tag
807 None : tag does not exist
809 None : tag does not exist
808 '''
810 '''
809
811
810 return self._tagscache.tagtypes.get(tagname)
812 return self._tagscache.tagtypes.get(tagname)
811
813
812 def tagslist(self):
814 def tagslist(self):
813 '''return a list of tags ordered by revision'''
815 '''return a list of tags ordered by revision'''
814 if not self._tagscache.tagslist:
816 if not self._tagscache.tagslist:
815 l = []
817 l = []
816 for t, n in self.tags().iteritems():
818 for t, n in self.tags().iteritems():
817 l.append((self.changelog.rev(n), t, n))
819 l.append((self.changelog.rev(n), t, n))
818 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
820 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
819
821
820 return self._tagscache.tagslist
822 return self._tagscache.tagslist
821
823
822 def nodetags(self, node):
824 def nodetags(self, node):
823 '''return the tags associated with a node'''
825 '''return the tags associated with a node'''
824 if not self._tagscache.nodetagscache:
826 if not self._tagscache.nodetagscache:
825 nodetagscache = {}
827 nodetagscache = {}
826 for t, n in self._tagscache.tags.iteritems():
828 for t, n in self._tagscache.tags.iteritems():
827 nodetagscache.setdefault(n, []).append(t)
829 nodetagscache.setdefault(n, []).append(t)
828 for tags in nodetagscache.itervalues():
830 for tags in nodetagscache.itervalues():
829 tags.sort()
831 tags.sort()
830 self._tagscache.nodetagscache = nodetagscache
832 self._tagscache.nodetagscache = nodetagscache
831 return self._tagscache.nodetagscache.get(node, [])
833 return self._tagscache.nodetagscache.get(node, [])
832
834
833 def nodebookmarks(self, node):
835 def nodebookmarks(self, node):
834 """return the list of bookmarks pointing to the specified node"""
836 """return the list of bookmarks pointing to the specified node"""
835 marks = []
837 marks = []
836 for bookmark, n in self._bookmarks.iteritems():
838 for bookmark, n in self._bookmarks.iteritems():
837 if n == node:
839 if n == node:
838 marks.append(bookmark)
840 marks.append(bookmark)
839 return sorted(marks)
841 return sorted(marks)
840
842
841 def branchmap(self):
843 def branchmap(self):
842 '''returns a dictionary {branch: [branchheads]} with branchheads
844 '''returns a dictionary {branch: [branchheads]} with branchheads
843 ordered by increasing revision number'''
845 ordered by increasing revision number'''
844 branchmap.updatecache(self)
846 branchmap.updatecache(self)
845 return self._branchcaches[self.filtername]
847 return self._branchcaches[self.filtername]
846
848
847 @unfilteredmethod
849 @unfilteredmethod
848 def revbranchcache(self):
850 def revbranchcache(self):
849 if not self._revbranchcache:
851 if not self._revbranchcache:
850 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
852 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
851 return self._revbranchcache
853 return self._revbranchcache
852
854
853 def branchtip(self, branch, ignoremissing=False):
855 def branchtip(self, branch, ignoremissing=False):
854 '''return the tip node for a given branch
856 '''return the tip node for a given branch
855
857
856 If ignoremissing is True, then this method will not raise an error.
858 If ignoremissing is True, then this method will not raise an error.
857 This is helpful for callers that only expect None for a missing branch
859 This is helpful for callers that only expect None for a missing branch
858 (e.g. namespace).
860 (e.g. namespace).
859
861
860 '''
862 '''
861 try:
863 try:
862 return self.branchmap().branchtip(branch)
864 return self.branchmap().branchtip(branch)
863 except KeyError:
865 except KeyError:
864 if not ignoremissing:
866 if not ignoremissing:
865 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
867 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
866 else:
868 else:
867 pass
869 pass
868
870
869 def lookup(self, key):
871 def lookup(self, key):
870 return self[key].node()
872 return self[key].node()
871
873
872 def lookupbranch(self, key, remote=None):
874 def lookupbranch(self, key, remote=None):
873 repo = remote or self
875 repo = remote or self
874 if key in repo.branchmap():
876 if key in repo.branchmap():
875 return key
877 return key
876
878
877 repo = (remote and remote.local()) and remote or self
879 repo = (remote and remote.local()) and remote or self
878 return repo[key].branch()
880 return repo[key].branch()
879
881
880 def known(self, nodes):
882 def known(self, nodes):
881 cl = self.changelog
883 cl = self.changelog
882 nm = cl.nodemap
884 nm = cl.nodemap
883 filtered = cl.filteredrevs
885 filtered = cl.filteredrevs
884 result = []
886 result = []
885 for n in nodes:
887 for n in nodes:
886 r = nm.get(n)
888 r = nm.get(n)
887 resp = not (r is None or r in filtered)
889 resp = not (r is None or r in filtered)
888 result.append(resp)
890 result.append(resp)
889 return result
891 return result
890
892
891 def local(self):
893 def local(self):
892 return self
894 return self
893
895
894 def publishing(self):
896 def publishing(self):
895 # it's safe (and desirable) to trust the publish flag unconditionally
897 # it's safe (and desirable) to trust the publish flag unconditionally
896 # so that we don't finalize changes shared between users via ssh or nfs
898 # so that we don't finalize changes shared between users via ssh or nfs
897 return self.ui.configbool('phases', 'publish', True, untrusted=True)
899 return self.ui.configbool('phases', 'publish', True, untrusted=True)
898
900
899 def cancopy(self):
901 def cancopy(self):
900 # so statichttprepo's override of local() works
902 # so statichttprepo's override of local() works
901 if not self.local():
903 if not self.local():
902 return False
904 return False
903 if not self.publishing():
905 if not self.publishing():
904 return True
906 return True
905 # if publishing we can't copy if there is filtered content
907 # if publishing we can't copy if there is filtered content
906 return not self.filtered('visible').changelog.filteredrevs
908 return not self.filtered('visible').changelog.filteredrevs
907
909
908 def shared(self):
910 def shared(self):
909 '''the type of shared repository (None if not shared)'''
911 '''the type of shared repository (None if not shared)'''
910 if self.sharedpath != self.path:
912 if self.sharedpath != self.path:
911 return 'store'
913 return 'store'
912 return None
914 return None
913
915
914 def join(self, f, *insidef):
916 def join(self, f, *insidef):
915 return self.vfs.join(os.path.join(f, *insidef))
917 return self.vfs.join(os.path.join(f, *insidef))
916
918
917 def wjoin(self, f, *insidef):
919 def wjoin(self, f, *insidef):
918 return self.vfs.reljoin(self.root, f, *insidef)
920 return self.vfs.reljoin(self.root, f, *insidef)
919
921
920 def file(self, f):
922 def file(self, f):
921 if f[0] == '/':
923 if f[0] == '/':
922 f = f[1:]
924 f = f[1:]
923 return filelog.filelog(self.svfs, f)
925 return filelog.filelog(self.svfs, f)
924
926
925 def changectx(self, changeid):
927 def changectx(self, changeid):
926 return self[changeid]
928 return self[changeid]
927
929
928 def setparents(self, p1, p2=nullid):
930 def setparents(self, p1, p2=nullid):
929 self.dirstate.beginparentchange()
931 self.dirstate.beginparentchange()
930 copies = self.dirstate.setparents(p1, p2)
932 copies = self.dirstate.setparents(p1, p2)
931 pctx = self[p1]
933 pctx = self[p1]
932 if copies:
934 if copies:
933 # Adjust copy records, the dirstate cannot do it, it
935 # Adjust copy records, the dirstate cannot do it, it
934 # requires access to parents manifests. Preserve them
936 # requires access to parents manifests. Preserve them
935 # only for entries added to first parent.
937 # only for entries added to first parent.
936 for f in copies:
938 for f in copies:
937 if f not in pctx and copies[f] in pctx:
939 if f not in pctx and copies[f] in pctx:
938 self.dirstate.copy(copies[f], f)
940 self.dirstate.copy(copies[f], f)
939 if p2 == nullid:
941 if p2 == nullid:
940 for f, s in sorted(self.dirstate.copies().items()):
942 for f, s in sorted(self.dirstate.copies().items()):
941 if f not in pctx and s not in pctx:
943 if f not in pctx and s not in pctx:
942 self.dirstate.copy(None, f)
944 self.dirstate.copy(None, f)
943 self.dirstate.endparentchange()
945 self.dirstate.endparentchange()
944
946
945 def filectx(self, path, changeid=None, fileid=None):
947 def filectx(self, path, changeid=None, fileid=None):
946 """changeid can be a changeset revision, node, or tag.
948 """changeid can be a changeset revision, node, or tag.
947 fileid can be a file revision or node."""
949 fileid can be a file revision or node."""
948 return context.filectx(self, path, changeid, fileid)
950 return context.filectx(self, path, changeid, fileid)
949
951
950 def getcwd(self):
952 def getcwd(self):
951 return self.dirstate.getcwd()
953 return self.dirstate.getcwd()
952
954
953 def pathto(self, f, cwd=None):
955 def pathto(self, f, cwd=None):
954 return self.dirstate.pathto(f, cwd)
956 return self.dirstate.pathto(f, cwd)
955
957
956 def wfile(self, f, mode='r'):
958 def wfile(self, f, mode='r'):
957 return self.wvfs(f, mode)
959 return self.wvfs(f, mode)
958
960
959 def _link(self, f):
961 def _link(self, f):
960 return self.wvfs.islink(f)
962 return self.wvfs.islink(f)
961
963
962 def _loadfilter(self, filter):
964 def _loadfilter(self, filter):
963 if filter not in self.filterpats:
965 if filter not in self.filterpats:
964 l = []
966 l = []
965 for pat, cmd in self.ui.configitems(filter):
967 for pat, cmd in self.ui.configitems(filter):
966 if cmd == '!':
968 if cmd == '!':
967 continue
969 continue
968 mf = matchmod.match(self.root, '', [pat])
970 mf = matchmod.match(self.root, '', [pat])
969 fn = None
971 fn = None
970 params = cmd
972 params = cmd
971 for name, filterfn in self._datafilters.iteritems():
973 for name, filterfn in self._datafilters.iteritems():
972 if cmd.startswith(name):
974 if cmd.startswith(name):
973 fn = filterfn
975 fn = filterfn
974 params = cmd[len(name):].lstrip()
976 params = cmd[len(name):].lstrip()
975 break
977 break
976 if not fn:
978 if not fn:
977 fn = lambda s, c, **kwargs: util.filter(s, c)
979 fn = lambda s, c, **kwargs: util.filter(s, c)
978 # Wrap old filters not supporting keyword arguments
980 # Wrap old filters not supporting keyword arguments
979 if not inspect.getargspec(fn)[2]:
981 if not inspect.getargspec(fn)[2]:
980 oldfn = fn
982 oldfn = fn
981 fn = lambda s, c, **kwargs: oldfn(s, c)
983 fn = lambda s, c, **kwargs: oldfn(s, c)
982 l.append((mf, fn, params))
984 l.append((mf, fn, params))
983 self.filterpats[filter] = l
985 self.filterpats[filter] = l
984 return self.filterpats[filter]
986 return self.filterpats[filter]
985
987
986 def _filter(self, filterpats, filename, data):
988 def _filter(self, filterpats, filename, data):
987 for mf, fn, cmd in filterpats:
989 for mf, fn, cmd in filterpats:
988 if mf(filename):
990 if mf(filename):
989 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
991 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
990 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
992 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
991 break
993 break
992
994
993 return data
995 return data
994
996
995 @unfilteredpropertycache
997 @unfilteredpropertycache
996 def _encodefilterpats(self):
998 def _encodefilterpats(self):
997 return self._loadfilter('encode')
999 return self._loadfilter('encode')
998
1000
999 @unfilteredpropertycache
1001 @unfilteredpropertycache
1000 def _decodefilterpats(self):
1002 def _decodefilterpats(self):
1001 return self._loadfilter('decode')
1003 return self._loadfilter('decode')
1002
1004
1003 def adddatafilter(self, name, filter):
1005 def adddatafilter(self, name, filter):
1004 self._datafilters[name] = filter
1006 self._datafilters[name] = filter
1005
1007
1006 def wread(self, filename):
1008 def wread(self, filename):
1007 if self._link(filename):
1009 if self._link(filename):
1008 data = self.wvfs.readlink(filename)
1010 data = self.wvfs.readlink(filename)
1009 else:
1011 else:
1010 data = self.wvfs.read(filename)
1012 data = self.wvfs.read(filename)
1011 return self._filter(self._encodefilterpats, filename, data)
1013 return self._filter(self._encodefilterpats, filename, data)
1012
1014
1013 def wwrite(self, filename, data, flags, backgroundclose=False):
1015 def wwrite(self, filename, data, flags, backgroundclose=False):
1014 """write ``data`` into ``filename`` in the working directory
1016 """write ``data`` into ``filename`` in the working directory
1015
1017
1016 This returns length of written (maybe decoded) data.
1018 This returns length of written (maybe decoded) data.
1017 """
1019 """
1018 data = self._filter(self._decodefilterpats, filename, data)
1020 data = self._filter(self._decodefilterpats, filename, data)
1019 if 'l' in flags:
1021 if 'l' in flags:
1020 self.wvfs.symlink(data, filename)
1022 self.wvfs.symlink(data, filename)
1021 else:
1023 else:
1022 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1024 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1023 if 'x' in flags:
1025 if 'x' in flags:
1024 self.wvfs.setflags(filename, False, True)
1026 self.wvfs.setflags(filename, False, True)
1025 return len(data)
1027 return len(data)
1026
1028
1027 def wwritedata(self, filename, data):
1029 def wwritedata(self, filename, data):
1028 return self._filter(self._decodefilterpats, filename, data)
1030 return self._filter(self._decodefilterpats, filename, data)
1029
1031
1030 def currenttransaction(self):
1032 def currenttransaction(self):
1031 """return the current transaction or None if non exists"""
1033 """return the current transaction or None if non exists"""
1032 if self._transref:
1034 if self._transref:
1033 tr = self._transref()
1035 tr = self._transref()
1034 else:
1036 else:
1035 tr = None
1037 tr = None
1036
1038
1037 if tr and tr.running():
1039 if tr and tr.running():
1038 return tr
1040 return tr
1039 return None
1041 return None
1040
1042
1041 def transaction(self, desc, report=None):
1043 def transaction(self, desc, report=None):
1042 if (self.ui.configbool('devel', 'all-warnings')
1044 if (self.ui.configbool('devel', 'all-warnings')
1043 or self.ui.configbool('devel', 'check-locks')):
1045 or self.ui.configbool('devel', 'check-locks')):
1044 if self._currentlock(self._lockref) is None:
1046 if self._currentlock(self._lockref) is None:
1045 raise error.ProgrammingError('transaction requires locking')
1047 raise error.ProgrammingError('transaction requires locking')
1046 tr = self.currenttransaction()
1048 tr = self.currenttransaction()
1047 if tr is not None:
1049 if tr is not None:
1048 return tr.nest()
1050 return tr.nest()
1049
1051
1050 # abort here if the journal already exists
1052 # abort here if the journal already exists
1051 if self.svfs.exists("journal"):
1053 if self.svfs.exists("journal"):
1052 raise error.RepoError(
1054 raise error.RepoError(
1053 _("abandoned transaction found"),
1055 _("abandoned transaction found"),
1054 hint=_("run 'hg recover' to clean up transaction"))
1056 hint=_("run 'hg recover' to clean up transaction"))
1055
1057
1056 idbase = "%.40f#%f" % (random.random(), time.time())
1058 idbase = "%.40f#%f" % (random.random(), time.time())
1057 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1059 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1058 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1060 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1059
1061
1060 self._writejournal(desc)
1062 self._writejournal(desc)
1061 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1063 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1062 if report:
1064 if report:
1063 rp = report
1065 rp = report
1064 else:
1066 else:
1065 rp = self.ui.warn
1067 rp = self.ui.warn
1066 vfsmap = {'plain': self.vfs} # root of .hg/
1068 vfsmap = {'plain': self.vfs} # root of .hg/
1067 # we must avoid cyclic reference between repo and transaction.
1069 # we must avoid cyclic reference between repo and transaction.
1068 reporef = weakref.ref(self)
1070 reporef = weakref.ref(self)
1069 def validate(tr):
1071 def validate(tr):
1070 """will run pre-closing hooks"""
1072 """will run pre-closing hooks"""
1071 reporef().hook('pretxnclose', throw=True,
1073 reporef().hook('pretxnclose', throw=True,
1072 txnname=desc, **tr.hookargs)
1074 txnname=desc, **tr.hookargs)
1073 def releasefn(tr, success):
1075 def releasefn(tr, success):
1074 repo = reporef()
1076 repo = reporef()
1075 if success:
1077 if success:
1076 # this should be explicitly invoked here, because
1078 # this should be explicitly invoked here, because
1077 # in-memory changes aren't written out at closing
1079 # in-memory changes aren't written out at closing
1078 # transaction, if tr.addfilegenerator (via
1080 # transaction, if tr.addfilegenerator (via
1079 # dirstate.write or so) isn't invoked while
1081 # dirstate.write or so) isn't invoked while
1080 # transaction running
1082 # transaction running
1081 repo.dirstate.write(None)
1083 repo.dirstate.write(None)
1082 else:
1084 else:
1083 # discard all changes (including ones already written
1085 # discard all changes (including ones already written
1084 # out) in this transaction
1086 # out) in this transaction
1085 repo.dirstate.restorebackup(None, prefix='journal.')
1087 repo.dirstate.restorebackup(None, prefix='journal.')
1086
1088
1087 repo.invalidate(clearfilecache=True)
1089 repo.invalidate(clearfilecache=True)
1088
1090
1089 tr = transaction.transaction(rp, self.svfs, vfsmap,
1091 tr = transaction.transaction(rp, self.svfs, vfsmap,
1090 "journal",
1092 "journal",
1091 "undo",
1093 "undo",
1092 aftertrans(renames),
1094 aftertrans(renames),
1093 self.store.createmode,
1095 self.store.createmode,
1094 validator=validate,
1096 validator=validate,
1095 releasefn=releasefn)
1097 releasefn=releasefn)
1096
1098
1097 tr.hookargs['txnid'] = txnid
1099 tr.hookargs['txnid'] = txnid
1098 # note: writing the fncache only during finalize mean that the file is
1100 # note: writing the fncache only during finalize mean that the file is
1099 # outdated when running hooks. As fncache is used for streaming clone,
1101 # outdated when running hooks. As fncache is used for streaming clone,
1100 # this is not expected to break anything that happen during the hooks.
1102 # this is not expected to break anything that happen during the hooks.
1101 tr.addfinalize('flush-fncache', self.store.write)
1103 tr.addfinalize('flush-fncache', self.store.write)
1102 def txnclosehook(tr2):
1104 def txnclosehook(tr2):
1103 """To be run if transaction is successful, will schedule a hook run
1105 """To be run if transaction is successful, will schedule a hook run
1104 """
1106 """
1105 # Don't reference tr2 in hook() so we don't hold a reference.
1107 # Don't reference tr2 in hook() so we don't hold a reference.
1106 # This reduces memory consumption when there are multiple
1108 # This reduces memory consumption when there are multiple
1107 # transactions per lock. This can likely go away if issue5045
1109 # transactions per lock. This can likely go away if issue5045
1108 # fixes the function accumulation.
1110 # fixes the function accumulation.
1109 hookargs = tr2.hookargs
1111 hookargs = tr2.hookargs
1110
1112
1111 def hook():
1113 def hook():
1112 reporef().hook('txnclose', throw=False, txnname=desc,
1114 reporef().hook('txnclose', throw=False, txnname=desc,
1113 **hookargs)
1115 **hookargs)
1114 reporef()._afterlock(hook)
1116 reporef()._afterlock(hook)
1115 tr.addfinalize('txnclose-hook', txnclosehook)
1117 tr.addfinalize('txnclose-hook', txnclosehook)
1116 def txnaborthook(tr2):
1118 def txnaborthook(tr2):
1117 """To be run if transaction is aborted
1119 """To be run if transaction is aborted
1118 """
1120 """
1119 reporef().hook('txnabort', throw=False, txnname=desc,
1121 reporef().hook('txnabort', throw=False, txnname=desc,
1120 **tr2.hookargs)
1122 **tr2.hookargs)
1121 tr.addabort('txnabort-hook', txnaborthook)
1123 tr.addabort('txnabort-hook', txnaborthook)
1122 # avoid eager cache invalidation. in-memory data should be identical
1124 # avoid eager cache invalidation. in-memory data should be identical
1123 # to stored data if transaction has no error.
1125 # to stored data if transaction has no error.
1124 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1126 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1125 self._transref = weakref.ref(tr)
1127 self._transref = weakref.ref(tr)
1126 return tr
1128 return tr
1127
1129
1128 def _journalfiles(self):
1130 def _journalfiles(self):
1129 return ((self.svfs, 'journal'),
1131 return ((self.svfs, 'journal'),
1130 (self.vfs, 'journal.dirstate'),
1132 (self.vfs, 'journal.dirstate'),
1131 (self.vfs, 'journal.branch'),
1133 (self.vfs, 'journal.branch'),
1132 (self.vfs, 'journal.desc'),
1134 (self.vfs, 'journal.desc'),
1133 (self.vfs, 'journal.bookmarks'),
1135 (self.vfs, 'journal.bookmarks'),
1134 (self.svfs, 'journal.phaseroots'))
1136 (self.svfs, 'journal.phaseroots'))
1135
1137
1136 def undofiles(self):
1138 def undofiles(self):
1137 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1139 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1138
1140
1139 def _writejournal(self, desc):
1141 def _writejournal(self, desc):
1140 self.dirstate.savebackup(None, prefix='journal.')
1142 self.dirstate.savebackup(None, prefix='journal.')
1141 self.vfs.write("journal.branch",
1143 self.vfs.write("journal.branch",
1142 encoding.fromlocal(self.dirstate.branch()))
1144 encoding.fromlocal(self.dirstate.branch()))
1143 self.vfs.write("journal.desc",
1145 self.vfs.write("journal.desc",
1144 "%d\n%s\n" % (len(self), desc))
1146 "%d\n%s\n" % (len(self), desc))
1145 self.vfs.write("journal.bookmarks",
1147 self.vfs.write("journal.bookmarks",
1146 self.vfs.tryread("bookmarks"))
1148 self.vfs.tryread("bookmarks"))
1147 self.svfs.write("journal.phaseroots",
1149 self.svfs.write("journal.phaseroots",
1148 self.svfs.tryread("phaseroots"))
1150 self.svfs.tryread("phaseroots"))
1149
1151
1150 def recover(self):
1152 def recover(self):
1151 with self.lock():
1153 with self.lock():
1152 if self.svfs.exists("journal"):
1154 if self.svfs.exists("journal"):
1153 self.ui.status(_("rolling back interrupted transaction\n"))
1155 self.ui.status(_("rolling back interrupted transaction\n"))
1154 vfsmap = {'': self.svfs,
1156 vfsmap = {'': self.svfs,
1155 'plain': self.vfs,}
1157 'plain': self.vfs,}
1156 transaction.rollback(self.svfs, vfsmap, "journal",
1158 transaction.rollback(self.svfs, vfsmap, "journal",
1157 self.ui.warn)
1159 self.ui.warn)
1158 self.invalidate()
1160 self.invalidate()
1159 return True
1161 return True
1160 else:
1162 else:
1161 self.ui.warn(_("no interrupted transaction available\n"))
1163 self.ui.warn(_("no interrupted transaction available\n"))
1162 return False
1164 return False
1163
1165
1164 def rollback(self, dryrun=False, force=False):
1166 def rollback(self, dryrun=False, force=False):
1165 wlock = lock = dsguard = None
1167 wlock = lock = dsguard = None
1166 try:
1168 try:
1167 wlock = self.wlock()
1169 wlock = self.wlock()
1168 lock = self.lock()
1170 lock = self.lock()
1169 if self.svfs.exists("undo"):
1171 if self.svfs.exists("undo"):
1170 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1172 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1171
1173
1172 return self._rollback(dryrun, force, dsguard)
1174 return self._rollback(dryrun, force, dsguard)
1173 else:
1175 else:
1174 self.ui.warn(_("no rollback information available\n"))
1176 self.ui.warn(_("no rollback information available\n"))
1175 return 1
1177 return 1
1176 finally:
1178 finally:
1177 release(dsguard, lock, wlock)
1179 release(dsguard, lock, wlock)
1178
1180
1179 @unfilteredmethod # Until we get smarter cache management
1181 @unfilteredmethod # Until we get smarter cache management
1180 def _rollback(self, dryrun, force, dsguard):
1182 def _rollback(self, dryrun, force, dsguard):
1181 ui = self.ui
1183 ui = self.ui
1182 try:
1184 try:
1183 args = self.vfs.read('undo.desc').splitlines()
1185 args = self.vfs.read('undo.desc').splitlines()
1184 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1186 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1185 if len(args) >= 3:
1187 if len(args) >= 3:
1186 detail = args[2]
1188 detail = args[2]
1187 oldtip = oldlen - 1
1189 oldtip = oldlen - 1
1188
1190
1189 if detail and ui.verbose:
1191 if detail and ui.verbose:
1190 msg = (_('repository tip rolled back to revision %s'
1192 msg = (_('repository tip rolled back to revision %s'
1191 ' (undo %s: %s)\n')
1193 ' (undo %s: %s)\n')
1192 % (oldtip, desc, detail))
1194 % (oldtip, desc, detail))
1193 else:
1195 else:
1194 msg = (_('repository tip rolled back to revision %s'
1196 msg = (_('repository tip rolled back to revision %s'
1195 ' (undo %s)\n')
1197 ' (undo %s)\n')
1196 % (oldtip, desc))
1198 % (oldtip, desc))
1197 except IOError:
1199 except IOError:
1198 msg = _('rolling back unknown transaction\n')
1200 msg = _('rolling back unknown transaction\n')
1199 desc = None
1201 desc = None
1200
1202
1201 if not force and self['.'] != self['tip'] and desc == 'commit':
1203 if not force and self['.'] != self['tip'] and desc == 'commit':
1202 raise error.Abort(
1204 raise error.Abort(
1203 _('rollback of last commit while not checked out '
1205 _('rollback of last commit while not checked out '
1204 'may lose data'), hint=_('use -f to force'))
1206 'may lose data'), hint=_('use -f to force'))
1205
1207
1206 ui.status(msg)
1208 ui.status(msg)
1207 if dryrun:
1209 if dryrun:
1208 return 0
1210 return 0
1209
1211
1210 parents = self.dirstate.parents()
1212 parents = self.dirstate.parents()
1211 self.destroying()
1213 self.destroying()
1212 vfsmap = {'plain': self.vfs, '': self.svfs}
1214 vfsmap = {'plain': self.vfs, '': self.svfs}
1213 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1215 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1214 if self.vfs.exists('undo.bookmarks'):
1216 if self.vfs.exists('undo.bookmarks'):
1215 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1217 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1216 if self.svfs.exists('undo.phaseroots'):
1218 if self.svfs.exists('undo.phaseroots'):
1217 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1219 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1218 self.invalidate()
1220 self.invalidate()
1219
1221
1220 parentgone = (parents[0] not in self.changelog.nodemap or
1222 parentgone = (parents[0] not in self.changelog.nodemap or
1221 parents[1] not in self.changelog.nodemap)
1223 parents[1] not in self.changelog.nodemap)
1222 if parentgone:
1224 if parentgone:
1223 # prevent dirstateguard from overwriting already restored one
1225 # prevent dirstateguard from overwriting already restored one
1224 dsguard.close()
1226 dsguard.close()
1225
1227
1226 self.dirstate.restorebackup(None, prefix='undo.')
1228 self.dirstate.restorebackup(None, prefix='undo.')
1227 try:
1229 try:
1228 branch = self.vfs.read('undo.branch')
1230 branch = self.vfs.read('undo.branch')
1229 self.dirstate.setbranch(encoding.tolocal(branch))
1231 self.dirstate.setbranch(encoding.tolocal(branch))
1230 except IOError:
1232 except IOError:
1231 ui.warn(_('named branch could not be reset: '
1233 ui.warn(_('named branch could not be reset: '
1232 'current branch is still \'%s\'\n')
1234 'current branch is still \'%s\'\n')
1233 % self.dirstate.branch())
1235 % self.dirstate.branch())
1234
1236
1235 parents = tuple([p.rev() for p in self[None].parents()])
1237 parents = tuple([p.rev() for p in self[None].parents()])
1236 if len(parents) > 1:
1238 if len(parents) > 1:
1237 ui.status(_('working directory now based on '
1239 ui.status(_('working directory now based on '
1238 'revisions %d and %d\n') % parents)
1240 'revisions %d and %d\n') % parents)
1239 else:
1241 else:
1240 ui.status(_('working directory now based on '
1242 ui.status(_('working directory now based on '
1241 'revision %d\n') % parents)
1243 'revision %d\n') % parents)
1242 mergemod.mergestate.clean(self, self['.'].node())
1244 mergemod.mergestate.clean(self, self['.'].node())
1243
1245
1244 # TODO: if we know which new heads may result from this rollback, pass
1246 # TODO: if we know which new heads may result from this rollback, pass
1245 # them to destroy(), which will prevent the branchhead cache from being
1247 # them to destroy(), which will prevent the branchhead cache from being
1246 # invalidated.
1248 # invalidated.
1247 self.destroyed()
1249 self.destroyed()
1248 return 0
1250 return 0
1249
1251
1250 def invalidatecaches(self):
1252 def invalidatecaches(self):
1251
1253
1252 if '_tagscache' in vars(self):
1254 if '_tagscache' in vars(self):
1253 # can't use delattr on proxy
1255 # can't use delattr on proxy
1254 del self.__dict__['_tagscache']
1256 del self.__dict__['_tagscache']
1255
1257
1256 self.unfiltered()._branchcaches.clear()
1258 self.unfiltered()._branchcaches.clear()
1257 self.invalidatevolatilesets()
1259 self.invalidatevolatilesets()
1258
1260
1259 def invalidatevolatilesets(self):
1261 def invalidatevolatilesets(self):
1260 self.filteredrevcache.clear()
1262 self.filteredrevcache.clear()
1261 obsolete.clearobscaches(self)
1263 obsolete.clearobscaches(self)
1262
1264
1263 def invalidatedirstate(self):
1265 def invalidatedirstate(self):
1264 '''Invalidates the dirstate, causing the next call to dirstate
1266 '''Invalidates the dirstate, causing the next call to dirstate
1265 to check if it was modified since the last time it was read,
1267 to check if it was modified since the last time it was read,
1266 rereading it if it has.
1268 rereading it if it has.
1267
1269
1268 This is different to dirstate.invalidate() that it doesn't always
1270 This is different to dirstate.invalidate() that it doesn't always
1269 rereads the dirstate. Use dirstate.invalidate() if you want to
1271 rereads the dirstate. Use dirstate.invalidate() if you want to
1270 explicitly read the dirstate again (i.e. restoring it to a previous
1272 explicitly read the dirstate again (i.e. restoring it to a previous
1271 known good state).'''
1273 known good state).'''
1272 if hasunfilteredcache(self, 'dirstate'):
1274 if hasunfilteredcache(self, 'dirstate'):
1273 for k in self.dirstate._filecache:
1275 for k in self.dirstate._filecache:
1274 try:
1276 try:
1275 delattr(self.dirstate, k)
1277 delattr(self.dirstate, k)
1276 except AttributeError:
1278 except AttributeError:
1277 pass
1279 pass
1278 delattr(self.unfiltered(), 'dirstate')
1280 delattr(self.unfiltered(), 'dirstate')
1279
1281
1280 def invalidate(self, clearfilecache=False):
1282 def invalidate(self, clearfilecache=False):
1281 '''Invalidates both store and non-store parts other than dirstate
1283 '''Invalidates both store and non-store parts other than dirstate
1282
1284
1283 If a transaction is running, invalidation of store is omitted,
1285 If a transaction is running, invalidation of store is omitted,
1284 because discarding in-memory changes might cause inconsistency
1286 because discarding in-memory changes might cause inconsistency
1285 (e.g. incomplete fncache causes unintentional failure, but
1287 (e.g. incomplete fncache causes unintentional failure, but
1286 redundant one doesn't).
1288 redundant one doesn't).
1287 '''
1289 '''
1288 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1290 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1289 for k in self._filecache.keys():
1291 for k in self._filecache.keys():
1290 # dirstate is invalidated separately in invalidatedirstate()
1292 # dirstate is invalidated separately in invalidatedirstate()
1291 if k == 'dirstate':
1293 if k == 'dirstate':
1292 continue
1294 continue
1293
1295
1294 if clearfilecache:
1296 if clearfilecache:
1295 del self._filecache[k]
1297 del self._filecache[k]
1296 try:
1298 try:
1297 delattr(unfiltered, k)
1299 delattr(unfiltered, k)
1298 except AttributeError:
1300 except AttributeError:
1299 pass
1301 pass
1300 self.invalidatecaches()
1302 self.invalidatecaches()
1301 if not self.currenttransaction():
1303 if not self.currenttransaction():
1302 # TODO: Changing contents of store outside transaction
1304 # TODO: Changing contents of store outside transaction
1303 # causes inconsistency. We should make in-memory store
1305 # causes inconsistency. We should make in-memory store
1304 # changes detectable, and abort if changed.
1306 # changes detectable, and abort if changed.
1305 self.store.invalidatecaches()
1307 self.store.invalidatecaches()
1306
1308
1307 def invalidateall(self):
1309 def invalidateall(self):
1308 '''Fully invalidates both store and non-store parts, causing the
1310 '''Fully invalidates both store and non-store parts, causing the
1309 subsequent operation to reread any outside changes.'''
1311 subsequent operation to reread any outside changes.'''
1310 # extension should hook this to invalidate its caches
1312 # extension should hook this to invalidate its caches
1311 self.invalidate()
1313 self.invalidate()
1312 self.invalidatedirstate()
1314 self.invalidatedirstate()
1313
1315
1314 @unfilteredmethod
1316 @unfilteredmethod
1315 def _refreshfilecachestats(self, tr):
1317 def _refreshfilecachestats(self, tr):
1316 """Reload stats of cached files so that they are flagged as valid"""
1318 """Reload stats of cached files so that they are flagged as valid"""
1317 for k, ce in self._filecache.items():
1319 for k, ce in self._filecache.items():
1318 if k == 'dirstate' or k not in self.__dict__:
1320 if k == 'dirstate' or k not in self.__dict__:
1319 continue
1321 continue
1320 ce.refresh()
1322 ce.refresh()
1321
1323
1322 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1324 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1323 inheritchecker=None, parentenvvar=None):
1325 inheritchecker=None, parentenvvar=None):
1324 parentlock = None
1326 parentlock = None
1325 # the contents of parentenvvar are used by the underlying lock to
1327 # the contents of parentenvvar are used by the underlying lock to
1326 # determine whether it can be inherited
1328 # determine whether it can be inherited
1327 if parentenvvar is not None:
1329 if parentenvvar is not None:
1328 parentlock = encoding.environ.get(parentenvvar)
1330 parentlock = encoding.environ.get(parentenvvar)
1329 try:
1331 try:
1330 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1332 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1331 acquirefn=acquirefn, desc=desc,
1333 acquirefn=acquirefn, desc=desc,
1332 inheritchecker=inheritchecker,
1334 inheritchecker=inheritchecker,
1333 parentlock=parentlock)
1335 parentlock=parentlock)
1334 except error.LockHeld as inst:
1336 except error.LockHeld as inst:
1335 if not wait:
1337 if not wait:
1336 raise
1338 raise
1337 # show more details for new-style locks
1339 # show more details for new-style locks
1338 if ':' in inst.locker:
1340 if ':' in inst.locker:
1339 host, pid = inst.locker.split(":", 1)
1341 host, pid = inst.locker.split(":", 1)
1340 self.ui.warn(
1342 self.ui.warn(
1341 _("waiting for lock on %s held by process %r "
1343 _("waiting for lock on %s held by process %r "
1342 "on host %r\n") % (desc, pid, host))
1344 "on host %r\n") % (desc, pid, host))
1343 else:
1345 else:
1344 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1346 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1345 (desc, inst.locker))
1347 (desc, inst.locker))
1346 # default to 600 seconds timeout
1348 # default to 600 seconds timeout
1347 l = lockmod.lock(vfs, lockname,
1349 l = lockmod.lock(vfs, lockname,
1348 int(self.ui.config("ui", "timeout", "600")),
1350 int(self.ui.config("ui", "timeout", "600")),
1349 releasefn=releasefn, acquirefn=acquirefn,
1351 releasefn=releasefn, acquirefn=acquirefn,
1350 desc=desc)
1352 desc=desc)
1351 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1353 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1352 return l
1354 return l
1353
1355
1354 def _afterlock(self, callback):
1356 def _afterlock(self, callback):
1355 """add a callback to be run when the repository is fully unlocked
1357 """add a callback to be run when the repository is fully unlocked
1356
1358
1357 The callback will be executed when the outermost lock is released
1359 The callback will be executed when the outermost lock is released
1358 (with wlock being higher level than 'lock')."""
1360 (with wlock being higher level than 'lock')."""
1359 for ref in (self._wlockref, self._lockref):
1361 for ref in (self._wlockref, self._lockref):
1360 l = ref and ref()
1362 l = ref and ref()
1361 if l and l.held:
1363 if l and l.held:
1362 l.postrelease.append(callback)
1364 l.postrelease.append(callback)
1363 break
1365 break
1364 else: # no lock have been found.
1366 else: # no lock have been found.
1365 callback()
1367 callback()
1366
1368
1367 def lock(self, wait=True):
1369 def lock(self, wait=True):
1368 '''Lock the repository store (.hg/store) and return a weak reference
1370 '''Lock the repository store (.hg/store) and return a weak reference
1369 to the lock. Use this before modifying the store (e.g. committing or
1371 to the lock. Use this before modifying the store (e.g. committing or
1370 stripping). If you are opening a transaction, get a lock as well.)
1372 stripping). If you are opening a transaction, get a lock as well.)
1371
1373
1372 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1374 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1373 'wlock' first to avoid a dead-lock hazard.'''
1375 'wlock' first to avoid a dead-lock hazard.'''
1374 l = self._currentlock(self._lockref)
1376 l = self._currentlock(self._lockref)
1375 if l is not None:
1377 if l is not None:
1376 l.lock()
1378 l.lock()
1377 return l
1379 return l
1378
1380
1379 l = self._lock(self.svfs, "lock", wait, None,
1381 l = self._lock(self.svfs, "lock", wait, None,
1380 self.invalidate, _('repository %s') % self.origroot)
1382 self.invalidate, _('repository %s') % self.origroot)
1381 self._lockref = weakref.ref(l)
1383 self._lockref = weakref.ref(l)
1382 return l
1384 return l
1383
1385
1384 def _wlockchecktransaction(self):
1386 def _wlockchecktransaction(self):
1385 if self.currenttransaction() is not None:
1387 if self.currenttransaction() is not None:
1386 raise error.LockInheritanceContractViolation(
1388 raise error.LockInheritanceContractViolation(
1387 'wlock cannot be inherited in the middle of a transaction')
1389 'wlock cannot be inherited in the middle of a transaction')
1388
1390
1389 def wlock(self, wait=True):
1391 def wlock(self, wait=True):
1390 '''Lock the non-store parts of the repository (everything under
1392 '''Lock the non-store parts of the repository (everything under
1391 .hg except .hg/store) and return a weak reference to the lock.
1393 .hg except .hg/store) and return a weak reference to the lock.
1392
1394
1393 Use this before modifying files in .hg.
1395 Use this before modifying files in .hg.
1394
1396
1395 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1397 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1396 'wlock' first to avoid a dead-lock hazard.'''
1398 'wlock' first to avoid a dead-lock hazard.'''
1397 l = self._wlockref and self._wlockref()
1399 l = self._wlockref and self._wlockref()
1398 if l is not None and l.held:
1400 if l is not None and l.held:
1399 l.lock()
1401 l.lock()
1400 return l
1402 return l
1401
1403
1402 # We do not need to check for non-waiting lock acquisition. Such
1404 # We do not need to check for non-waiting lock acquisition. Such
1403 # acquisition would not cause dead-lock as they would just fail.
1405 # acquisition would not cause dead-lock as they would just fail.
1404 if wait and (self.ui.configbool('devel', 'all-warnings')
1406 if wait and (self.ui.configbool('devel', 'all-warnings')
1405 or self.ui.configbool('devel', 'check-locks')):
1407 or self.ui.configbool('devel', 'check-locks')):
1406 if self._currentlock(self._lockref) is not None:
1408 if self._currentlock(self._lockref) is not None:
1407 self.ui.develwarn('"wlock" acquired after "lock"')
1409 self.ui.develwarn('"wlock" acquired after "lock"')
1408
1410
1409 def unlock():
1411 def unlock():
1410 if self.dirstate.pendingparentchange():
1412 if self.dirstate.pendingparentchange():
1411 self.dirstate.invalidate()
1413 self.dirstate.invalidate()
1412 else:
1414 else:
1413 self.dirstate.write(None)
1415 self.dirstate.write(None)
1414
1416
1415 self._filecache['dirstate'].refresh()
1417 self._filecache['dirstate'].refresh()
1416
1418
1417 l = self._lock(self.vfs, "wlock", wait, unlock,
1419 l = self._lock(self.vfs, "wlock", wait, unlock,
1418 self.invalidatedirstate, _('working directory of %s') %
1420 self.invalidatedirstate, _('working directory of %s') %
1419 self.origroot,
1421 self.origroot,
1420 inheritchecker=self._wlockchecktransaction,
1422 inheritchecker=self._wlockchecktransaction,
1421 parentenvvar='HG_WLOCK_LOCKER')
1423 parentenvvar='HG_WLOCK_LOCKER')
1422 self._wlockref = weakref.ref(l)
1424 self._wlockref = weakref.ref(l)
1423 return l
1425 return l
1424
1426
1425 def _currentlock(self, lockref):
1427 def _currentlock(self, lockref):
1426 """Returns the lock if it's held, or None if it's not."""
1428 """Returns the lock if it's held, or None if it's not."""
1427 if lockref is None:
1429 if lockref is None:
1428 return None
1430 return None
1429 l = lockref()
1431 l = lockref()
1430 if l is None or not l.held:
1432 if l is None or not l.held:
1431 return None
1433 return None
1432 return l
1434 return l
1433
1435
1434 def currentwlock(self):
1436 def currentwlock(self):
1435 """Returns the wlock if it's held, or None if it's not."""
1437 """Returns the wlock if it's held, or None if it's not."""
1436 return self._currentlock(self._wlockref)
1438 return self._currentlock(self._wlockref)
1437
1439
1438 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1440 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1439 """
1441 """
1440 commit an individual file as part of a larger transaction
1442 commit an individual file as part of a larger transaction
1441 """
1443 """
1442
1444
1443 fname = fctx.path()
1445 fname = fctx.path()
1444 fparent1 = manifest1.get(fname, nullid)
1446 fparent1 = manifest1.get(fname, nullid)
1445 fparent2 = manifest2.get(fname, nullid)
1447 fparent2 = manifest2.get(fname, nullid)
1446 if isinstance(fctx, context.filectx):
1448 if isinstance(fctx, context.filectx):
1447 node = fctx.filenode()
1449 node = fctx.filenode()
1448 if node in [fparent1, fparent2]:
1450 if node in [fparent1, fparent2]:
1449 self.ui.debug('reusing %s filelog entry\n' % fname)
1451 self.ui.debug('reusing %s filelog entry\n' % fname)
1450 if manifest1.flags(fname) != fctx.flags():
1452 if manifest1.flags(fname) != fctx.flags():
1451 changelist.append(fname)
1453 changelist.append(fname)
1452 return node
1454 return node
1453
1455
1454 flog = self.file(fname)
1456 flog = self.file(fname)
1455 meta = {}
1457 meta = {}
1456 copy = fctx.renamed()
1458 copy = fctx.renamed()
1457 if copy and copy[0] != fname:
1459 if copy and copy[0] != fname:
1458 # Mark the new revision of this file as a copy of another
1460 # Mark the new revision of this file as a copy of another
1459 # file. This copy data will effectively act as a parent
1461 # file. This copy data will effectively act as a parent
1460 # of this new revision. If this is a merge, the first
1462 # of this new revision. If this is a merge, the first
1461 # parent will be the nullid (meaning "look up the copy data")
1463 # parent will be the nullid (meaning "look up the copy data")
1462 # and the second one will be the other parent. For example:
1464 # and the second one will be the other parent. For example:
1463 #
1465 #
1464 # 0 --- 1 --- 3 rev1 changes file foo
1466 # 0 --- 1 --- 3 rev1 changes file foo
1465 # \ / rev2 renames foo to bar and changes it
1467 # \ / rev2 renames foo to bar and changes it
1466 # \- 2 -/ rev3 should have bar with all changes and
1468 # \- 2 -/ rev3 should have bar with all changes and
1467 # should record that bar descends from
1469 # should record that bar descends from
1468 # bar in rev2 and foo in rev1
1470 # bar in rev2 and foo in rev1
1469 #
1471 #
1470 # this allows this merge to succeed:
1472 # this allows this merge to succeed:
1471 #
1473 #
1472 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1474 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1473 # \ / merging rev3 and rev4 should use bar@rev2
1475 # \ / merging rev3 and rev4 should use bar@rev2
1474 # \- 2 --- 4 as the merge base
1476 # \- 2 --- 4 as the merge base
1475 #
1477 #
1476
1478
1477 cfname = copy[0]
1479 cfname = copy[0]
1478 crev = manifest1.get(cfname)
1480 crev = manifest1.get(cfname)
1479 newfparent = fparent2
1481 newfparent = fparent2
1480
1482
1481 if manifest2: # branch merge
1483 if manifest2: # branch merge
1482 if fparent2 == nullid or crev is None: # copied on remote side
1484 if fparent2 == nullid or crev is None: # copied on remote side
1483 if cfname in manifest2:
1485 if cfname in manifest2:
1484 crev = manifest2[cfname]
1486 crev = manifest2[cfname]
1485 newfparent = fparent1
1487 newfparent = fparent1
1486
1488
1487 # Here, we used to search backwards through history to try to find
1489 # Here, we used to search backwards through history to try to find
1488 # where the file copy came from if the source of a copy was not in
1490 # where the file copy came from if the source of a copy was not in
1489 # the parent directory. However, this doesn't actually make sense to
1491 # the parent directory. However, this doesn't actually make sense to
1490 # do (what does a copy from something not in your working copy even
1492 # do (what does a copy from something not in your working copy even
1491 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1493 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1492 # the user that copy information was dropped, so if they didn't
1494 # the user that copy information was dropped, so if they didn't
1493 # expect this outcome it can be fixed, but this is the correct
1495 # expect this outcome it can be fixed, but this is the correct
1494 # behavior in this circumstance.
1496 # behavior in this circumstance.
1495
1497
1496 if crev:
1498 if crev:
1497 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1499 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1498 meta["copy"] = cfname
1500 meta["copy"] = cfname
1499 meta["copyrev"] = hex(crev)
1501 meta["copyrev"] = hex(crev)
1500 fparent1, fparent2 = nullid, newfparent
1502 fparent1, fparent2 = nullid, newfparent
1501 else:
1503 else:
1502 self.ui.warn(_("warning: can't find ancestor for '%s' "
1504 self.ui.warn(_("warning: can't find ancestor for '%s' "
1503 "copied from '%s'!\n") % (fname, cfname))
1505 "copied from '%s'!\n") % (fname, cfname))
1504
1506
1505 elif fparent1 == nullid:
1507 elif fparent1 == nullid:
1506 fparent1, fparent2 = fparent2, nullid
1508 fparent1, fparent2 = fparent2, nullid
1507 elif fparent2 != nullid:
1509 elif fparent2 != nullid:
1508 # is one parent an ancestor of the other?
1510 # is one parent an ancestor of the other?
1509 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1511 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1510 if fparent1 in fparentancestors:
1512 if fparent1 in fparentancestors:
1511 fparent1, fparent2 = fparent2, nullid
1513 fparent1, fparent2 = fparent2, nullid
1512 elif fparent2 in fparentancestors:
1514 elif fparent2 in fparentancestors:
1513 fparent2 = nullid
1515 fparent2 = nullid
1514
1516
1515 # is the file changed?
1517 # is the file changed?
1516 text = fctx.data()
1518 text = fctx.data()
1517 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1519 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1518 changelist.append(fname)
1520 changelist.append(fname)
1519 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1521 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1520 # are just the flags changed during merge?
1522 # are just the flags changed during merge?
1521 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1523 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1522 changelist.append(fname)
1524 changelist.append(fname)
1523
1525
1524 return fparent1
1526 return fparent1
1525
1527
1526 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1528 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1527 """check for commit arguments that aren't committable"""
1529 """check for commit arguments that aren't committable"""
1528 if match.isexact() or match.prefix():
1530 if match.isexact() or match.prefix():
1529 matched = set(status.modified + status.added + status.removed)
1531 matched = set(status.modified + status.added + status.removed)
1530
1532
1531 for f in match.files():
1533 for f in match.files():
1532 f = self.dirstate.normalize(f)
1534 f = self.dirstate.normalize(f)
1533 if f == '.' or f in matched or f in wctx.substate:
1535 if f == '.' or f in matched or f in wctx.substate:
1534 continue
1536 continue
1535 if f in status.deleted:
1537 if f in status.deleted:
1536 fail(f, _('file not found!'))
1538 fail(f, _('file not found!'))
1537 if f in vdirs: # visited directory
1539 if f in vdirs: # visited directory
1538 d = f + '/'
1540 d = f + '/'
1539 for mf in matched:
1541 for mf in matched:
1540 if mf.startswith(d):
1542 if mf.startswith(d):
1541 break
1543 break
1542 else:
1544 else:
1543 fail(f, _("no match under directory!"))
1545 fail(f, _("no match under directory!"))
1544 elif f not in self.dirstate:
1546 elif f not in self.dirstate:
1545 fail(f, _("file not tracked!"))
1547 fail(f, _("file not tracked!"))
1546
1548
1547 @unfilteredmethod
1549 @unfilteredmethod
1548 def commit(self, text="", user=None, date=None, match=None, force=False,
1550 def commit(self, text="", user=None, date=None, match=None, force=False,
1549 editor=False, extra=None):
1551 editor=False, extra=None):
1550 """Add a new revision to current repository.
1552 """Add a new revision to current repository.
1551
1553
1552 Revision information is gathered from the working directory,
1554 Revision information is gathered from the working directory,
1553 match can be used to filter the committed files. If editor is
1555 match can be used to filter the committed files. If editor is
1554 supplied, it is called to get a commit message.
1556 supplied, it is called to get a commit message.
1555 """
1557 """
1556 if extra is None:
1558 if extra is None:
1557 extra = {}
1559 extra = {}
1558
1560
1559 def fail(f, msg):
1561 def fail(f, msg):
1560 raise error.Abort('%s: %s' % (f, msg))
1562 raise error.Abort('%s: %s' % (f, msg))
1561
1563
1562 if not match:
1564 if not match:
1563 match = matchmod.always(self.root, '')
1565 match = matchmod.always(self.root, '')
1564
1566
1565 if not force:
1567 if not force:
1566 vdirs = []
1568 vdirs = []
1567 match.explicitdir = vdirs.append
1569 match.explicitdir = vdirs.append
1568 match.bad = fail
1570 match.bad = fail
1569
1571
1570 wlock = lock = tr = None
1572 wlock = lock = tr = None
1571 try:
1573 try:
1572 wlock = self.wlock()
1574 wlock = self.wlock()
1573 lock = self.lock() # for recent changelog (see issue4368)
1575 lock = self.lock() # for recent changelog (see issue4368)
1574
1576
1575 wctx = self[None]
1577 wctx = self[None]
1576 merge = len(wctx.parents()) > 1
1578 merge = len(wctx.parents()) > 1
1577
1579
1578 if not force and merge and match.ispartial():
1580 if not force and merge and match.ispartial():
1579 raise error.Abort(_('cannot partially commit a merge '
1581 raise error.Abort(_('cannot partially commit a merge '
1580 '(do not specify files or patterns)'))
1582 '(do not specify files or patterns)'))
1581
1583
1582 status = self.status(match=match, clean=force)
1584 status = self.status(match=match, clean=force)
1583 if force:
1585 if force:
1584 status.modified.extend(status.clean) # mq may commit clean files
1586 status.modified.extend(status.clean) # mq may commit clean files
1585
1587
1586 # check subrepos
1588 # check subrepos
1587 subs = []
1589 subs = []
1588 commitsubs = set()
1590 commitsubs = set()
1589 newstate = wctx.substate.copy()
1591 newstate = wctx.substate.copy()
1590 # only manage subrepos and .hgsubstate if .hgsub is present
1592 # only manage subrepos and .hgsubstate if .hgsub is present
1591 if '.hgsub' in wctx:
1593 if '.hgsub' in wctx:
1592 # we'll decide whether to track this ourselves, thanks
1594 # we'll decide whether to track this ourselves, thanks
1593 for c in status.modified, status.added, status.removed:
1595 for c in status.modified, status.added, status.removed:
1594 if '.hgsubstate' in c:
1596 if '.hgsubstate' in c:
1595 c.remove('.hgsubstate')
1597 c.remove('.hgsubstate')
1596
1598
1597 # compare current state to last committed state
1599 # compare current state to last committed state
1598 # build new substate based on last committed state
1600 # build new substate based on last committed state
1599 oldstate = wctx.p1().substate
1601 oldstate = wctx.p1().substate
1600 for s in sorted(newstate.keys()):
1602 for s in sorted(newstate.keys()):
1601 if not match(s):
1603 if not match(s):
1602 # ignore working copy, use old state if present
1604 # ignore working copy, use old state if present
1603 if s in oldstate:
1605 if s in oldstate:
1604 newstate[s] = oldstate[s]
1606 newstate[s] = oldstate[s]
1605 continue
1607 continue
1606 if not force:
1608 if not force:
1607 raise error.Abort(
1609 raise error.Abort(
1608 _("commit with new subrepo %s excluded") % s)
1610 _("commit with new subrepo %s excluded") % s)
1609 dirtyreason = wctx.sub(s).dirtyreason(True)
1611 dirtyreason = wctx.sub(s).dirtyreason(True)
1610 if dirtyreason:
1612 if dirtyreason:
1611 if not self.ui.configbool('ui', 'commitsubrepos'):
1613 if not self.ui.configbool('ui', 'commitsubrepos'):
1612 raise error.Abort(dirtyreason,
1614 raise error.Abort(dirtyreason,
1613 hint=_("use --subrepos for recursive commit"))
1615 hint=_("use --subrepos for recursive commit"))
1614 subs.append(s)
1616 subs.append(s)
1615 commitsubs.add(s)
1617 commitsubs.add(s)
1616 else:
1618 else:
1617 bs = wctx.sub(s).basestate()
1619 bs = wctx.sub(s).basestate()
1618 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1620 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1619 if oldstate.get(s, (None, None, None))[1] != bs:
1621 if oldstate.get(s, (None, None, None))[1] != bs:
1620 subs.append(s)
1622 subs.append(s)
1621
1623
1622 # check for removed subrepos
1624 # check for removed subrepos
1623 for p in wctx.parents():
1625 for p in wctx.parents():
1624 r = [s for s in p.substate if s not in newstate]
1626 r = [s for s in p.substate if s not in newstate]
1625 subs += [s for s in r if match(s)]
1627 subs += [s for s in r if match(s)]
1626 if subs:
1628 if subs:
1627 if (not match('.hgsub') and
1629 if (not match('.hgsub') and
1628 '.hgsub' in (wctx.modified() + wctx.added())):
1630 '.hgsub' in (wctx.modified() + wctx.added())):
1629 raise error.Abort(
1631 raise error.Abort(
1630 _("can't commit subrepos without .hgsub"))
1632 _("can't commit subrepos without .hgsub"))
1631 status.modified.insert(0, '.hgsubstate')
1633 status.modified.insert(0, '.hgsubstate')
1632
1634
1633 elif '.hgsub' in status.removed:
1635 elif '.hgsub' in status.removed:
1634 # clean up .hgsubstate when .hgsub is removed
1636 # clean up .hgsubstate when .hgsub is removed
1635 if ('.hgsubstate' in wctx and
1637 if ('.hgsubstate' in wctx and
1636 '.hgsubstate' not in (status.modified + status.added +
1638 '.hgsubstate' not in (status.modified + status.added +
1637 status.removed)):
1639 status.removed)):
1638 status.removed.insert(0, '.hgsubstate')
1640 status.removed.insert(0, '.hgsubstate')
1639
1641
1640 # make sure all explicit patterns are matched
1642 # make sure all explicit patterns are matched
1641 if not force:
1643 if not force:
1642 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1644 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1643
1645
1644 cctx = context.workingcommitctx(self, status,
1646 cctx = context.workingcommitctx(self, status,
1645 text, user, date, extra)
1647 text, user, date, extra)
1646
1648
1647 # internal config: ui.allowemptycommit
1649 # internal config: ui.allowemptycommit
1648 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1650 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1649 or extra.get('close') or merge or cctx.files()
1651 or extra.get('close') or merge or cctx.files()
1650 or self.ui.configbool('ui', 'allowemptycommit'))
1652 or self.ui.configbool('ui', 'allowemptycommit'))
1651 if not allowemptycommit:
1653 if not allowemptycommit:
1652 return None
1654 return None
1653
1655
1654 if merge and cctx.deleted():
1656 if merge and cctx.deleted():
1655 raise error.Abort(_("cannot commit merge with missing files"))
1657 raise error.Abort(_("cannot commit merge with missing files"))
1656
1658
1657 ms = mergemod.mergestate.read(self)
1659 ms = mergemod.mergestate.read(self)
1658 mergeutil.checkunresolved(ms)
1660 mergeutil.checkunresolved(ms)
1659
1661
1660 if editor:
1662 if editor:
1661 cctx._text = editor(self, cctx, subs)
1663 cctx._text = editor(self, cctx, subs)
1662 edited = (text != cctx._text)
1664 edited = (text != cctx._text)
1663
1665
1664 # Save commit message in case this transaction gets rolled back
1666 # Save commit message in case this transaction gets rolled back
1665 # (e.g. by a pretxncommit hook). Leave the content alone on
1667 # (e.g. by a pretxncommit hook). Leave the content alone on
1666 # the assumption that the user will use the same editor again.
1668 # the assumption that the user will use the same editor again.
1667 msgfn = self.savecommitmessage(cctx._text)
1669 msgfn = self.savecommitmessage(cctx._text)
1668
1670
1669 # commit subs and write new state
1671 # commit subs and write new state
1670 if subs:
1672 if subs:
1671 for s in sorted(commitsubs):
1673 for s in sorted(commitsubs):
1672 sub = wctx.sub(s)
1674 sub = wctx.sub(s)
1673 self.ui.status(_('committing subrepository %s\n') %
1675 self.ui.status(_('committing subrepository %s\n') %
1674 subrepo.subrelpath(sub))
1676 subrepo.subrelpath(sub))
1675 sr = sub.commit(cctx._text, user, date)
1677 sr = sub.commit(cctx._text, user, date)
1676 newstate[s] = (newstate[s][0], sr)
1678 newstate[s] = (newstate[s][0], sr)
1677 subrepo.writestate(self, newstate)
1679 subrepo.writestate(self, newstate)
1678
1680
1679 p1, p2 = self.dirstate.parents()
1681 p1, p2 = self.dirstate.parents()
1680 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1682 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1681 try:
1683 try:
1682 self.hook("precommit", throw=True, parent1=hookp1,
1684 self.hook("precommit", throw=True, parent1=hookp1,
1683 parent2=hookp2)
1685 parent2=hookp2)
1684 tr = self.transaction('commit')
1686 tr = self.transaction('commit')
1685 ret = self.commitctx(cctx, True)
1687 ret = self.commitctx(cctx, True)
1686 except: # re-raises
1688 except: # re-raises
1687 if edited:
1689 if edited:
1688 self.ui.write(
1690 self.ui.write(
1689 _('note: commit message saved in %s\n') % msgfn)
1691 _('note: commit message saved in %s\n') % msgfn)
1690 raise
1692 raise
1691 # update bookmarks, dirstate and mergestate
1693 # update bookmarks, dirstate and mergestate
1692 bookmarks.update(self, [p1, p2], ret)
1694 bookmarks.update(self, [p1, p2], ret)
1693 cctx.markcommitted(ret)
1695 cctx.markcommitted(ret)
1694 ms.reset()
1696 ms.reset()
1695 tr.close()
1697 tr.close()
1696
1698
1697 finally:
1699 finally:
1698 lockmod.release(tr, lock, wlock)
1700 lockmod.release(tr, lock, wlock)
1699
1701
1700 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1702 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1701 # hack for command that use a temporary commit (eg: histedit)
1703 # hack for command that use a temporary commit (eg: histedit)
1702 # temporary commit got stripped before hook release
1704 # temporary commit got stripped before hook release
1703 if self.changelog.hasnode(ret):
1705 if self.changelog.hasnode(ret):
1704 self.hook("commit", node=node, parent1=parent1,
1706 self.hook("commit", node=node, parent1=parent1,
1705 parent2=parent2)
1707 parent2=parent2)
1706 self._afterlock(commithook)
1708 self._afterlock(commithook)
1707 return ret
1709 return ret
1708
1710
1709 @unfilteredmethod
1711 @unfilteredmethod
1710 def commitctx(self, ctx, error=False):
1712 def commitctx(self, ctx, error=False):
1711 """Add a new revision to current repository.
1713 """Add a new revision to current repository.
1712 Revision information is passed via the context argument.
1714 Revision information is passed via the context argument.
1713 """
1715 """
1714
1716
1715 tr = None
1717 tr = None
1716 p1, p2 = ctx.p1(), ctx.p2()
1718 p1, p2 = ctx.p1(), ctx.p2()
1717 user = ctx.user()
1719 user = ctx.user()
1718
1720
1719 lock = self.lock()
1721 lock = self.lock()
1720 try:
1722 try:
1721 tr = self.transaction("commit")
1723 tr = self.transaction("commit")
1722 trp = weakref.proxy(tr)
1724 trp = weakref.proxy(tr)
1723
1725
1724 if ctx.manifestnode():
1726 if ctx.manifestnode():
1725 # reuse an existing manifest revision
1727 # reuse an existing manifest revision
1726 mn = ctx.manifestnode()
1728 mn = ctx.manifestnode()
1727 files = ctx.files()
1729 files = ctx.files()
1728 elif ctx.files():
1730 elif ctx.files():
1729 m1ctx = p1.manifestctx()
1731 m1ctx = p1.manifestctx()
1730 m2ctx = p2.manifestctx()
1732 m2ctx = p2.manifestctx()
1731 mctx = m1ctx.copy()
1733 mctx = m1ctx.copy()
1732
1734
1733 m = mctx.read()
1735 m = mctx.read()
1734 m1 = m1ctx.read()
1736 m1 = m1ctx.read()
1735 m2 = m2ctx.read()
1737 m2 = m2ctx.read()
1736
1738
1737 # check in files
1739 # check in files
1738 added = []
1740 added = []
1739 changed = []
1741 changed = []
1740 removed = list(ctx.removed())
1742 removed = list(ctx.removed())
1741 linkrev = len(self)
1743 linkrev = len(self)
1742 self.ui.note(_("committing files:\n"))
1744 self.ui.note(_("committing files:\n"))
1743 for f in sorted(ctx.modified() + ctx.added()):
1745 for f in sorted(ctx.modified() + ctx.added()):
1744 self.ui.note(f + "\n")
1746 self.ui.note(f + "\n")
1745 try:
1747 try:
1746 fctx = ctx[f]
1748 fctx = ctx[f]
1747 if fctx is None:
1749 if fctx is None:
1748 removed.append(f)
1750 removed.append(f)
1749 else:
1751 else:
1750 added.append(f)
1752 added.append(f)
1751 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1753 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1752 trp, changed)
1754 trp, changed)
1753 m.setflag(f, fctx.flags())
1755 m.setflag(f, fctx.flags())
1754 except OSError as inst:
1756 except OSError as inst:
1755 self.ui.warn(_("trouble committing %s!\n") % f)
1757 self.ui.warn(_("trouble committing %s!\n") % f)
1756 raise
1758 raise
1757 except IOError as inst:
1759 except IOError as inst:
1758 errcode = getattr(inst, 'errno', errno.ENOENT)
1760 errcode = getattr(inst, 'errno', errno.ENOENT)
1759 if error or errcode and errcode != errno.ENOENT:
1761 if error or errcode and errcode != errno.ENOENT:
1760 self.ui.warn(_("trouble committing %s!\n") % f)
1762 self.ui.warn(_("trouble committing %s!\n") % f)
1761 raise
1763 raise
1762
1764
1763 # update manifest
1765 # update manifest
1764 self.ui.note(_("committing manifest\n"))
1766 self.ui.note(_("committing manifest\n"))
1765 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1767 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1766 drop = [f for f in removed if f in m]
1768 drop = [f for f in removed if f in m]
1767 for f in drop:
1769 for f in drop:
1768 del m[f]
1770 del m[f]
1769 mn = mctx.write(trp, linkrev,
1771 mn = mctx.write(trp, linkrev,
1770 p1.manifestnode(), p2.manifestnode(),
1772 p1.manifestnode(), p2.manifestnode(),
1771 added, drop)
1773 added, drop)
1772 files = changed + removed
1774 files = changed + removed
1773 else:
1775 else:
1774 mn = p1.manifestnode()
1776 mn = p1.manifestnode()
1775 files = []
1777 files = []
1776
1778
1777 # update changelog
1779 # update changelog
1778 self.ui.note(_("committing changelog\n"))
1780 self.ui.note(_("committing changelog\n"))
1779 self.changelog.delayupdate(tr)
1781 self.changelog.delayupdate(tr)
1780 n = self.changelog.add(mn, files, ctx.description(),
1782 n = self.changelog.add(mn, files, ctx.description(),
1781 trp, p1.node(), p2.node(),
1783 trp, p1.node(), p2.node(),
1782 user, ctx.date(), ctx.extra().copy())
1784 user, ctx.date(), ctx.extra().copy())
1783 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1785 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1784 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1786 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1785 parent2=xp2)
1787 parent2=xp2)
1786 # set the new commit is proper phase
1788 # set the new commit is proper phase
1787 targetphase = subrepo.newcommitphase(self.ui, ctx)
1789 targetphase = subrepo.newcommitphase(self.ui, ctx)
1788 if targetphase:
1790 if targetphase:
1789 # retract boundary do not alter parent changeset.
1791 # retract boundary do not alter parent changeset.
1790 # if a parent have higher the resulting phase will
1792 # if a parent have higher the resulting phase will
1791 # be compliant anyway
1793 # be compliant anyway
1792 #
1794 #
1793 # if minimal phase was 0 we don't need to retract anything
1795 # if minimal phase was 0 we don't need to retract anything
1794 phases.retractboundary(self, tr, targetphase, [n])
1796 phases.retractboundary(self, tr, targetphase, [n])
1795 tr.close()
1797 tr.close()
1796 branchmap.updatecache(self.filtered('served'))
1798 branchmap.updatecache(self.filtered('served'))
1797 return n
1799 return n
1798 finally:
1800 finally:
1799 if tr:
1801 if tr:
1800 tr.release()
1802 tr.release()
1801 lock.release()
1803 lock.release()
1802
1804
1803 @unfilteredmethod
1805 @unfilteredmethod
1804 def destroying(self):
1806 def destroying(self):
1805 '''Inform the repository that nodes are about to be destroyed.
1807 '''Inform the repository that nodes are about to be destroyed.
1806 Intended for use by strip and rollback, so there's a common
1808 Intended for use by strip and rollback, so there's a common
1807 place for anything that has to be done before destroying history.
1809 place for anything that has to be done before destroying history.
1808
1810
1809 This is mostly useful for saving state that is in memory and waiting
1811 This is mostly useful for saving state that is in memory and waiting
1810 to be flushed when the current lock is released. Because a call to
1812 to be flushed when the current lock is released. Because a call to
1811 destroyed is imminent, the repo will be invalidated causing those
1813 destroyed is imminent, the repo will be invalidated causing those
1812 changes to stay in memory (waiting for the next unlock), or vanish
1814 changes to stay in memory (waiting for the next unlock), or vanish
1813 completely.
1815 completely.
1814 '''
1816 '''
1815 # When using the same lock to commit and strip, the phasecache is left
1817 # When using the same lock to commit and strip, the phasecache is left
1816 # dirty after committing. Then when we strip, the repo is invalidated,
1818 # dirty after committing. Then when we strip, the repo is invalidated,
1817 # causing those changes to disappear.
1819 # causing those changes to disappear.
1818 if '_phasecache' in vars(self):
1820 if '_phasecache' in vars(self):
1819 self._phasecache.write()
1821 self._phasecache.write()
1820
1822
1821 @unfilteredmethod
1823 @unfilteredmethod
1822 def destroyed(self):
1824 def destroyed(self):
1823 '''Inform the repository that nodes have been destroyed.
1825 '''Inform the repository that nodes have been destroyed.
1824 Intended for use by strip and rollback, so there's a common
1826 Intended for use by strip and rollback, so there's a common
1825 place for anything that has to be done after destroying history.
1827 place for anything that has to be done after destroying history.
1826 '''
1828 '''
1827 # When one tries to:
1829 # When one tries to:
1828 # 1) destroy nodes thus calling this method (e.g. strip)
1830 # 1) destroy nodes thus calling this method (e.g. strip)
1829 # 2) use phasecache somewhere (e.g. commit)
1831 # 2) use phasecache somewhere (e.g. commit)
1830 #
1832 #
1831 # then 2) will fail because the phasecache contains nodes that were
1833 # then 2) will fail because the phasecache contains nodes that were
1832 # removed. We can either remove phasecache from the filecache,
1834 # removed. We can either remove phasecache from the filecache,
1833 # causing it to reload next time it is accessed, or simply filter
1835 # causing it to reload next time it is accessed, or simply filter
1834 # the removed nodes now and write the updated cache.
1836 # the removed nodes now and write the updated cache.
1835 self._phasecache.filterunknown(self)
1837 self._phasecache.filterunknown(self)
1836 self._phasecache.write()
1838 self._phasecache.write()
1837
1839
1838 # update the 'served' branch cache to help read only server process
1840 # update the 'served' branch cache to help read only server process
1839 # Thanks to branchcache collaboration this is done from the nearest
1841 # Thanks to branchcache collaboration this is done from the nearest
1840 # filtered subset and it is expected to be fast.
1842 # filtered subset and it is expected to be fast.
1841 branchmap.updatecache(self.filtered('served'))
1843 branchmap.updatecache(self.filtered('served'))
1842
1844
1843 # Ensure the persistent tag cache is updated. Doing it now
1845 # Ensure the persistent tag cache is updated. Doing it now
1844 # means that the tag cache only has to worry about destroyed
1846 # means that the tag cache only has to worry about destroyed
1845 # heads immediately after a strip/rollback. That in turn
1847 # heads immediately after a strip/rollback. That in turn
1846 # guarantees that "cachetip == currenttip" (comparing both rev
1848 # guarantees that "cachetip == currenttip" (comparing both rev
1847 # and node) always means no nodes have been added or destroyed.
1849 # and node) always means no nodes have been added or destroyed.
1848
1850
1849 # XXX this is suboptimal when qrefresh'ing: we strip the current
1851 # XXX this is suboptimal when qrefresh'ing: we strip the current
1850 # head, refresh the tag cache, then immediately add a new head.
1852 # head, refresh the tag cache, then immediately add a new head.
1851 # But I think doing it this way is necessary for the "instant
1853 # But I think doing it this way is necessary for the "instant
1852 # tag cache retrieval" case to work.
1854 # tag cache retrieval" case to work.
1853 self.invalidate()
1855 self.invalidate()
1854
1856
1855 def walk(self, match, node=None):
1857 def walk(self, match, node=None):
1856 '''
1858 '''
1857 walk recursively through the directory tree or a given
1859 walk recursively through the directory tree or a given
1858 changeset, finding all files matched by the match
1860 changeset, finding all files matched by the match
1859 function
1861 function
1860 '''
1862 '''
1861 return self[node].walk(match)
1863 return self[node].walk(match)
1862
1864
1863 def status(self, node1='.', node2=None, match=None,
1865 def status(self, node1='.', node2=None, match=None,
1864 ignored=False, clean=False, unknown=False,
1866 ignored=False, clean=False, unknown=False,
1865 listsubrepos=False):
1867 listsubrepos=False):
1866 '''a convenience method that calls node1.status(node2)'''
1868 '''a convenience method that calls node1.status(node2)'''
1867 return self[node1].status(node2, match, ignored, clean, unknown,
1869 return self[node1].status(node2, match, ignored, clean, unknown,
1868 listsubrepos)
1870 listsubrepos)
1869
1871
1870 def heads(self, start=None):
1872 def heads(self, start=None):
1871 if start is None:
1873 if start is None:
1872 cl = self.changelog
1874 cl = self.changelog
1873 headrevs = reversed(cl.headrevs())
1875 headrevs = reversed(cl.headrevs())
1874 return [cl.node(rev) for rev in headrevs]
1876 return [cl.node(rev) for rev in headrevs]
1875
1877
1876 heads = self.changelog.heads(start)
1878 heads = self.changelog.heads(start)
1877 # sort the output in rev descending order
1879 # sort the output in rev descending order
1878 return sorted(heads, key=self.changelog.rev, reverse=True)
1880 return sorted(heads, key=self.changelog.rev, reverse=True)
1879
1881
1880 def branchheads(self, branch=None, start=None, closed=False):
1882 def branchheads(self, branch=None, start=None, closed=False):
1881 '''return a (possibly filtered) list of heads for the given branch
1883 '''return a (possibly filtered) list of heads for the given branch
1882
1884
1883 Heads are returned in topological order, from newest to oldest.
1885 Heads are returned in topological order, from newest to oldest.
1884 If branch is None, use the dirstate branch.
1886 If branch is None, use the dirstate branch.
1885 If start is not None, return only heads reachable from start.
1887 If start is not None, return only heads reachable from start.
1886 If closed is True, return heads that are marked as closed as well.
1888 If closed is True, return heads that are marked as closed as well.
1887 '''
1889 '''
1888 if branch is None:
1890 if branch is None:
1889 branch = self[None].branch()
1891 branch = self[None].branch()
1890 branches = self.branchmap()
1892 branches = self.branchmap()
1891 if branch not in branches:
1893 if branch not in branches:
1892 return []
1894 return []
1893 # the cache returns heads ordered lowest to highest
1895 # the cache returns heads ordered lowest to highest
1894 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1896 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1895 if start is not None:
1897 if start is not None:
1896 # filter out the heads that cannot be reached from startrev
1898 # filter out the heads that cannot be reached from startrev
1897 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1899 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1898 bheads = [h for h in bheads if h in fbheads]
1900 bheads = [h for h in bheads if h in fbheads]
1899 return bheads
1901 return bheads
1900
1902
1901 def branches(self, nodes):
1903 def branches(self, nodes):
1902 if not nodes:
1904 if not nodes:
1903 nodes = [self.changelog.tip()]
1905 nodes = [self.changelog.tip()]
1904 b = []
1906 b = []
1905 for n in nodes:
1907 for n in nodes:
1906 t = n
1908 t = n
1907 while True:
1909 while True:
1908 p = self.changelog.parents(n)
1910 p = self.changelog.parents(n)
1909 if p[1] != nullid or p[0] == nullid:
1911 if p[1] != nullid or p[0] == nullid:
1910 b.append((t, n, p[0], p[1]))
1912 b.append((t, n, p[0], p[1]))
1911 break
1913 break
1912 n = p[0]
1914 n = p[0]
1913 return b
1915 return b
1914
1916
1915 def between(self, pairs):
1917 def between(self, pairs):
1916 r = []
1918 r = []
1917
1919
1918 for top, bottom in pairs:
1920 for top, bottom in pairs:
1919 n, l, i = top, [], 0
1921 n, l, i = top, [], 0
1920 f = 1
1922 f = 1
1921
1923
1922 while n != bottom and n != nullid:
1924 while n != bottom and n != nullid:
1923 p = self.changelog.parents(n)[0]
1925 p = self.changelog.parents(n)[0]
1924 if i == f:
1926 if i == f:
1925 l.append(n)
1927 l.append(n)
1926 f = f * 2
1928 f = f * 2
1927 n = p
1929 n = p
1928 i += 1
1930 i += 1
1929
1931
1930 r.append(l)
1932 r.append(l)
1931
1933
1932 return r
1934 return r
1933
1935
1934 def checkpush(self, pushop):
1936 def checkpush(self, pushop):
1935 """Extensions can override this function if additional checks have
1937 """Extensions can override this function if additional checks have
1936 to be performed before pushing, or call it if they override push
1938 to be performed before pushing, or call it if they override push
1937 command.
1939 command.
1938 """
1940 """
1939 pass
1941 pass
1940
1942
1941 @unfilteredpropertycache
1943 @unfilteredpropertycache
1942 def prepushoutgoinghooks(self):
1944 def prepushoutgoinghooks(self):
1943 """Return util.hooks consists of a pushop with repo, remote, outgoing
1945 """Return util.hooks consists of a pushop with repo, remote, outgoing
1944 methods, which are called before pushing changesets.
1946 methods, which are called before pushing changesets.
1945 """
1947 """
1946 return util.hooks()
1948 return util.hooks()
1947
1949
1948 def pushkey(self, namespace, key, old, new):
1950 def pushkey(self, namespace, key, old, new):
1949 try:
1951 try:
1950 tr = self.currenttransaction()
1952 tr = self.currenttransaction()
1951 hookargs = {}
1953 hookargs = {}
1952 if tr is not None:
1954 if tr is not None:
1953 hookargs.update(tr.hookargs)
1955 hookargs.update(tr.hookargs)
1954 hookargs['namespace'] = namespace
1956 hookargs['namespace'] = namespace
1955 hookargs['key'] = key
1957 hookargs['key'] = key
1956 hookargs['old'] = old
1958 hookargs['old'] = old
1957 hookargs['new'] = new
1959 hookargs['new'] = new
1958 self.hook('prepushkey', throw=True, **hookargs)
1960 self.hook('prepushkey', throw=True, **hookargs)
1959 except error.HookAbort as exc:
1961 except error.HookAbort as exc:
1960 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1962 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1961 if exc.hint:
1963 if exc.hint:
1962 self.ui.write_err(_("(%s)\n") % exc.hint)
1964 self.ui.write_err(_("(%s)\n") % exc.hint)
1963 return False
1965 return False
1964 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1966 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1965 ret = pushkey.push(self, namespace, key, old, new)
1967 ret = pushkey.push(self, namespace, key, old, new)
1966 def runhook():
1968 def runhook():
1967 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1969 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1968 ret=ret)
1970 ret=ret)
1969 self._afterlock(runhook)
1971 self._afterlock(runhook)
1970 return ret
1972 return ret
1971
1973
1972 def listkeys(self, namespace):
1974 def listkeys(self, namespace):
1973 self.hook('prelistkeys', throw=True, namespace=namespace)
1975 self.hook('prelistkeys', throw=True, namespace=namespace)
1974 self.ui.debug('listing keys for "%s"\n' % namespace)
1976 self.ui.debug('listing keys for "%s"\n' % namespace)
1975 values = pushkey.list(self, namespace)
1977 values = pushkey.list(self, namespace)
1976 self.hook('listkeys', namespace=namespace, values=values)
1978 self.hook('listkeys', namespace=namespace, values=values)
1977 return values
1979 return values
1978
1980
1979 def debugwireargs(self, one, two, three=None, four=None, five=None):
1981 def debugwireargs(self, one, two, three=None, four=None, five=None):
1980 '''used to test argument passing over the wire'''
1982 '''used to test argument passing over the wire'''
1981 return "%s %s %s %s %s" % (one, two, three, four, five)
1983 return "%s %s %s %s %s" % (one, two, three, four, five)
1982
1984
1983 def savecommitmessage(self, text):
1985 def savecommitmessage(self, text):
1984 fp = self.vfs('last-message.txt', 'wb')
1986 fp = self.vfs('last-message.txt', 'wb')
1985 try:
1987 try:
1986 fp.write(text)
1988 fp.write(text)
1987 finally:
1989 finally:
1988 fp.close()
1990 fp.close()
1989 return self.pathto(fp.name[len(self.root) + 1:])
1991 return self.pathto(fp.name[len(self.root) + 1:])
1990
1992
1991 # used to avoid circular references so destructors work
1993 # used to avoid circular references so destructors work
1992 def aftertrans(files):
1994 def aftertrans(files):
1993 renamefiles = [tuple(t) for t in files]
1995 renamefiles = [tuple(t) for t in files]
1994 def a():
1996 def a():
1995 for vfs, src, dest in renamefiles:
1997 for vfs, src, dest in renamefiles:
1996 try:
1998 try:
1997 vfs.rename(src, dest)
1999 vfs.rename(src, dest)
1998 except OSError: # journal file does not yet exist
2000 except OSError: # journal file does not yet exist
1999 pass
2001 pass
2000 return a
2002 return a
2001
2003
2002 def undoname(fn):
2004 def undoname(fn):
2003 base, name = os.path.split(fn)
2005 base, name = os.path.split(fn)
2004 assert name.startswith('journal')
2006 assert name.startswith('journal')
2005 return os.path.join(base, name.replace('journal', 'undo', 1))
2007 return os.path.join(base, name.replace('journal', 'undo', 1))
2006
2008
2007 def instance(ui, path, create):
2009 def instance(ui, path, create):
2008 return localrepository(ui, util.urllocalpath(path), create)
2010 return localrepository(ui, util.urllocalpath(path), create)
2009
2011
2010 def islocal(path):
2012 def islocal(path):
2011 return True
2013 return True
2012
2014
2013 def newreporequirements(repo):
2015 def newreporequirements(repo):
2014 """Determine the set of requirements for a new local repository.
2016 """Determine the set of requirements for a new local repository.
2015
2017
2016 Extensions can wrap this function to specify custom requirements for
2018 Extensions can wrap this function to specify custom requirements for
2017 new repositories.
2019 new repositories.
2018 """
2020 """
2019 ui = repo.ui
2021 ui = repo.ui
2020 requirements = set(['revlogv1'])
2022 requirements = set(['revlogv1'])
2021 if ui.configbool('format', 'usestore', True):
2023 if ui.configbool('format', 'usestore', True):
2022 requirements.add('store')
2024 requirements.add('store')
2023 if ui.configbool('format', 'usefncache', True):
2025 if ui.configbool('format', 'usefncache', True):
2024 requirements.add('fncache')
2026 requirements.add('fncache')
2025 if ui.configbool('format', 'dotencode', True):
2027 if ui.configbool('format', 'dotencode', True):
2026 requirements.add('dotencode')
2028 requirements.add('dotencode')
2027
2029
2028 compengine = ui.config('experimental', 'format.compression', 'zlib')
2030 compengine = ui.config('experimental', 'format.compression', 'zlib')
2029 if compengine not in util.compengines:
2031 if compengine not in util.compengines:
2030 raise error.Abort(_('compression engine %s defined by '
2032 raise error.Abort(_('compression engine %s defined by '
2031 'experimental.format.compression not available') %
2033 'experimental.format.compression not available') %
2032 compengine,
2034 compengine,
2033 hint=_('run "hg debuginstall" to list available '
2035 hint=_('run "hg debuginstall" to list available '
2034 'compression engines'))
2036 'compression engines'))
2035
2037
2036 # zlib is the historical default and doesn't need an explicit requirement.
2038 # zlib is the historical default and doesn't need an explicit requirement.
2037 if compengine != 'zlib':
2039 if compengine != 'zlib':
2038 requirements.add('exp-compression-%s' % compengine)
2040 requirements.add('exp-compression-%s' % compengine)
2039
2041
2040 if scmutil.gdinitconfig(ui):
2042 if scmutil.gdinitconfig(ui):
2041 requirements.add('generaldelta')
2043 requirements.add('generaldelta')
2042 if ui.configbool('experimental', 'treemanifest', False):
2044 if ui.configbool('experimental', 'treemanifest', False):
2043 requirements.add('treemanifest')
2045 requirements.add('treemanifest')
2044 if ui.configbool('experimental', 'manifestv2', False):
2046 if ui.configbool('experimental', 'manifestv2', False):
2045 requirements.add('manifestv2')
2047 requirements.add('manifestv2')
2046
2048
2047 return requirements
2049 return requirements
@@ -1,388 +1,389 b''
1 $ cat <<EOF >> $HGRCPATH
1 $ cat <<EOF >> $HGRCPATH
2 > [ui]
2 > [ui]
3 > color = always
3 > color = always
4 > [color]
4 > [color]
5 > mode = ansi
5 > mode = ansi
6 > EOF
6 > EOF
7 Terminfo codes compatibility fix
7 Terminfo codes compatibility fix
8 $ echo "color.none=0" >> $HGRCPATH
8 $ echo "color.none=0" >> $HGRCPATH
9
9
10 $ hg init repo1
10 $ hg init repo1
11 $ cd repo1
11 $ cd repo1
12 $ mkdir a b a/1 b/1 b/2
12 $ mkdir a b a/1 b/1 b/2
13 $ touch in_root a/in_a b/in_b a/1/in_a_1 b/1/in_b_1 b/2/in_b_2
13 $ touch in_root a/in_a b/in_b a/1/in_a_1 b/1/in_b_1 b/2/in_b_2
14
14
15 hg status in repo root:
15 hg status in repo root:
16
16
17 $ hg status
17 $ hg status
18 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
18 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
19 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
19 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
20 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
20 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
21 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc)
21 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc)
22 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc)
22 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc)
23 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc)
23 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc)
24
24
25 $ hg status --color=debug
25 $ hg status --color=debug
26 [status.unknown|? ][status.unknown|a/1/in_a_1]
26 [status.unknown|? ][status.unknown|a/1/in_a_1]
27 [status.unknown|? ][status.unknown|a/in_a]
27 [status.unknown|? ][status.unknown|a/in_a]
28 [status.unknown|? ][status.unknown|b/1/in_b_1]
28 [status.unknown|? ][status.unknown|b/1/in_b_1]
29 [status.unknown|? ][status.unknown|b/2/in_b_2]
29 [status.unknown|? ][status.unknown|b/2/in_b_2]
30 [status.unknown|? ][status.unknown|b/in_b]
30 [status.unknown|? ][status.unknown|b/in_b]
31 [status.unknown|? ][status.unknown|in_root]
31 [status.unknown|? ][status.unknown|in_root]
32
32
33 hg status with template
33 hg status with template
34 $ hg status -T "{label('red', path)}\n" --color=debug
34 $ hg status -T "{label('red', path)}\n" --color=debug
35 [red|a/1/in_a_1]
35 [red|a/1/in_a_1]
36 [red|a/in_a]
36 [red|a/in_a]
37 [red|b/1/in_b_1]
37 [red|b/1/in_b_1]
38 [red|b/2/in_b_2]
38 [red|b/2/in_b_2]
39 [red|b/in_b]
39 [red|b/in_b]
40 [red|in_root]
40 [red|in_root]
41
41
42 hg status . in repo root:
42 hg status . in repo root:
43
43
44 $ hg status .
44 $ hg status .
45 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
45 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
46 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
46 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
47 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
47 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
48 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc)
48 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc)
49 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc)
49 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc)
50 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc)
50 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc)
51
51
52 $ hg status --cwd a
52 $ hg status --cwd a
53 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
53 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
54 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
54 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
55 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
55 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
56 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc)
56 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc)
57 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc)
57 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc)
58 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc)
58 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc)
59 $ hg status --cwd a .
59 $ hg status --cwd a .
60 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m1/in_a_1\x1b[0m (esc)
60 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m1/in_a_1\x1b[0m (esc)
61 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_a\x1b[0m (esc)
61 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_a\x1b[0m (esc)
62 $ hg status --cwd a ..
62 $ hg status --cwd a ..
63 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m1/in_a_1\x1b[0m (esc)
63 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m1/in_a_1\x1b[0m (esc)
64 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_a\x1b[0m (esc)
64 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_a\x1b[0m (esc)
65 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../b/1/in_b_1\x1b[0m (esc)
65 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../b/1/in_b_1\x1b[0m (esc)
66 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../b/2/in_b_2\x1b[0m (esc)
66 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../b/2/in_b_2\x1b[0m (esc)
67 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../b/in_b\x1b[0m (esc)
67 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../b/in_b\x1b[0m (esc)
68 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../in_root\x1b[0m (esc)
68 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../in_root\x1b[0m (esc)
69
69
70 $ hg status --cwd b
70 $ hg status --cwd b
71 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
71 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
72 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
72 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
73 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
73 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
74 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc)
74 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc)
75 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc)
75 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc)
76 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc)
76 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc)
77 $ hg status --cwd b .
77 $ hg status --cwd b .
78 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m1/in_b_1\x1b[0m (esc)
78 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m1/in_b_1\x1b[0m (esc)
79 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m2/in_b_2\x1b[0m (esc)
79 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m2/in_b_2\x1b[0m (esc)
80 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b\x1b[0m (esc)
80 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b\x1b[0m (esc)
81 $ hg status --cwd b ..
81 $ hg status --cwd b ..
82 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../a/1/in_a_1\x1b[0m (esc)
82 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../a/1/in_a_1\x1b[0m (esc)
83 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../a/in_a\x1b[0m (esc)
83 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../a/in_a\x1b[0m (esc)
84 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m1/in_b_1\x1b[0m (esc)
84 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m1/in_b_1\x1b[0m (esc)
85 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m2/in_b_2\x1b[0m (esc)
85 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m2/in_b_2\x1b[0m (esc)
86 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b\x1b[0m (esc)
86 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b\x1b[0m (esc)
87 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../in_root\x1b[0m (esc)
87 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../in_root\x1b[0m (esc)
88
88
89 $ hg status --cwd a/1
89 $ hg status --cwd a/1
90 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
90 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
91 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
91 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
92 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
92 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
93 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc)
93 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc)
94 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc)
94 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc)
95 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc)
95 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc)
96 $ hg status --cwd a/1 .
96 $ hg status --cwd a/1 .
97 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_a_1\x1b[0m (esc)
97 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_a_1\x1b[0m (esc)
98 $ hg status --cwd a/1 ..
98 $ hg status --cwd a/1 ..
99 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_a_1\x1b[0m (esc)
99 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_a_1\x1b[0m (esc)
100 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../in_a\x1b[0m (esc)
100 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../in_a\x1b[0m (esc)
101
101
102 $ hg status --cwd b/1
102 $ hg status --cwd b/1
103 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
103 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
104 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
104 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
105 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
105 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
106 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc)
106 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc)
107 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc)
107 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc)
108 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc)
108 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc)
109 $ hg status --cwd b/1 .
109 $ hg status --cwd b/1 .
110 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b_1\x1b[0m (esc)
110 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b_1\x1b[0m (esc)
111 $ hg status --cwd b/1 ..
111 $ hg status --cwd b/1 ..
112 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b_1\x1b[0m (esc)
112 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b_1\x1b[0m (esc)
113 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../2/in_b_2\x1b[0m (esc)
113 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../2/in_b_2\x1b[0m (esc)
114 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../in_b\x1b[0m (esc)
114 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../in_b\x1b[0m (esc)
115
115
116 $ hg status --cwd b/2
116 $ hg status --cwd b/2
117 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
117 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/1/in_a_1\x1b[0m (esc)
118 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
118 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4ma/in_a\x1b[0m (esc)
119 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
119 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/1/in_b_1\x1b[0m (esc)
120 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc)
120 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/2/in_b_2\x1b[0m (esc)
121 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc)
121 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4mb/in_b\x1b[0m (esc)
122 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc)
122 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_root\x1b[0m (esc)
123 $ hg status --cwd b/2 .
123 $ hg status --cwd b/2 .
124 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b_2\x1b[0m (esc)
124 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b_2\x1b[0m (esc)
125 $ hg status --cwd b/2 ..
125 $ hg status --cwd b/2 ..
126 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../1/in_b_1\x1b[0m (esc)
126 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../1/in_b_1\x1b[0m (esc)
127 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b_2\x1b[0m (esc)
127 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4min_b_2\x1b[0m (esc)
128 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../in_b\x1b[0m (esc)
128 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4m../in_b\x1b[0m (esc)
129
129
130 Make sure --color=never works
130 Make sure --color=never works
131 $ hg status --color=never
131 $ hg status --color=never
132 ? a/1/in_a_1
132 ? a/1/in_a_1
133 ? a/in_a
133 ? a/in_a
134 ? b/1/in_b_1
134 ? b/1/in_b_1
135 ? b/2/in_b_2
135 ? b/2/in_b_2
136 ? b/in_b
136 ? b/in_b
137 ? in_root
137 ? in_root
138
138
139 Make sure ui.formatted=False works
139 Make sure ui.formatted=False works
140 $ hg status --color=auto --config ui.formatted=False
140 $ hg status --color=auto --config ui.formatted=False
141 ? a/1/in_a_1
141 ? a/1/in_a_1
142 ? a/in_a
142 ? a/in_a
143 ? b/1/in_b_1
143 ? b/1/in_b_1
144 ? b/2/in_b_2
144 ? b/2/in_b_2
145 ? b/in_b
145 ? b/in_b
146 ? in_root
146 ? in_root
147
147
148 $ cd ..
148 $ cd ..
149
149
150 $ hg init repo2
150 $ hg init repo2
151 $ cd repo2
151 $ cd repo2
152 $ touch modified removed deleted ignored
152 $ touch modified removed deleted ignored
153 $ echo "^ignored$" > .hgignore
153 $ echo "^ignored$" > .hgignore
154 $ hg ci -A -m 'initial checkin'
154 $ hg ci -A -m 'initial checkin'
155 adding .hgignore
155 adding .hgignore
156 adding deleted
156 adding deleted
157 adding modified
157 adding modified
158 adding removed
158 adding removed
159 $ hg log --color=debug
159 $ hg log --color=debug
160 [log.changeset changeset.draft|changeset: 0:389aef86a55e]
160 [log.changeset changeset.draft|changeset: 0:389aef86a55e]
161 [log.tag|tag: tip]
161 [log.tag|tag: tip]
162 [log.user|user: test]
162 [log.user|user: test]
163 [log.date|date: Thu Jan 01 00:00:00 1970 +0000]
163 [log.date|date: Thu Jan 01 00:00:00 1970 +0000]
164 [log.summary|summary: initial checkin]
164 [log.summary|summary: initial checkin]
165
165
166 $ hg log -Tcompact --color=debug
166 $ hg log -Tcompact --color=debug
167 [log.changeset changeset.draft|0][tip] [log.node|389aef86a55e] [log.date|1970-01-01 00:00 +0000] [log.user|test]
167 [log.changeset changeset.draft|0][tip] [log.node|389aef86a55e] [log.date|1970-01-01 00:00 +0000] [log.user|test]
168 [ui.note log.description|initial checkin]
168 [ui.note log.description|initial checkin]
169
169
170 Labels on empty strings should not be displayed, labels on custom
170 Labels on empty strings should not be displayed, labels on custom
171 templates should be.
171 templates should be.
172
172
173 $ hg log --color=debug -T '{label("my.label",author)}\n{label("skipped.label","")}'
173 $ hg log --color=debug -T '{label("my.label",author)}\n{label("skipped.label","")}'
174 [my.label|test]
174 [my.label|test]
175 $ touch modified added unknown ignored
175 $ touch modified added unknown ignored
176 $ hg add added
176 $ hg add added
177 $ hg remove removed
177 $ hg remove removed
178 $ rm deleted
178 $ rm deleted
179
179
180 hg status:
180 hg status:
181
181
182 $ hg status
182 $ hg status
183 \x1b[0;32;1mA \x1b[0m\x1b[0;32;1madded\x1b[0m (esc)
183 \x1b[0;32;1mA \x1b[0m\x1b[0;32;1madded\x1b[0m (esc)
184 \x1b[0;31;1mR \x1b[0m\x1b[0;31;1mremoved\x1b[0m (esc)
184 \x1b[0;31;1mR \x1b[0m\x1b[0;31;1mremoved\x1b[0m (esc)
185 \x1b[0;36;1;4m! \x1b[0m\x1b[0;36;1;4mdeleted\x1b[0m (esc)
185 \x1b[0;36;1;4m! \x1b[0m\x1b[0;36;1;4mdeleted\x1b[0m (esc)
186 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4munknown\x1b[0m (esc)
186 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4munknown\x1b[0m (esc)
187
187
188 hg status modified added removed deleted unknown never-existed ignored:
188 hg status modified added removed deleted unknown never-existed ignored:
189
189
190 $ hg status modified added removed deleted unknown never-existed ignored
190 $ hg status modified added removed deleted unknown never-existed ignored
191 never-existed: * (glob)
191 never-existed: * (glob)
192 \x1b[0;32;1mA \x1b[0m\x1b[0;32;1madded\x1b[0m (esc)
192 \x1b[0;32;1mA \x1b[0m\x1b[0;32;1madded\x1b[0m (esc)
193 \x1b[0;31;1mR \x1b[0m\x1b[0;31;1mremoved\x1b[0m (esc)
193 \x1b[0;31;1mR \x1b[0m\x1b[0;31;1mremoved\x1b[0m (esc)
194 \x1b[0;36;1;4m! \x1b[0m\x1b[0;36;1;4mdeleted\x1b[0m (esc)
194 \x1b[0;36;1;4m! \x1b[0m\x1b[0;36;1;4mdeleted\x1b[0m (esc)
195 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4munknown\x1b[0m (esc)
195 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4munknown\x1b[0m (esc)
196
196
197 $ hg copy modified copied
197 $ hg copy modified copied
198
198
199 hg status -C:
199 hg status -C:
200
200
201 $ hg status -C
201 $ hg status -C
202 \x1b[0;32;1mA \x1b[0m\x1b[0;32;1madded\x1b[0m (esc)
202 \x1b[0;32;1mA \x1b[0m\x1b[0;32;1madded\x1b[0m (esc)
203 \x1b[0;32;1mA \x1b[0m\x1b[0;32;1mcopied\x1b[0m (esc)
203 \x1b[0;32;1mA \x1b[0m\x1b[0;32;1mcopied\x1b[0m (esc)
204 \x1b[0;0m modified\x1b[0m (esc)
204 \x1b[0;0m modified\x1b[0m (esc)
205 \x1b[0;31;1mR \x1b[0m\x1b[0;31;1mremoved\x1b[0m (esc)
205 \x1b[0;31;1mR \x1b[0m\x1b[0;31;1mremoved\x1b[0m (esc)
206 \x1b[0;36;1;4m! \x1b[0m\x1b[0;36;1;4mdeleted\x1b[0m (esc)
206 \x1b[0;36;1;4m! \x1b[0m\x1b[0;36;1;4mdeleted\x1b[0m (esc)
207 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4munknown\x1b[0m (esc)
207 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4munknown\x1b[0m (esc)
208
208
209 hg status -A:
209 hg status -A:
210
210
211 $ hg status -A
211 $ hg status -A
212 \x1b[0;32;1mA \x1b[0m\x1b[0;32;1madded\x1b[0m (esc)
212 \x1b[0;32;1mA \x1b[0m\x1b[0;32;1madded\x1b[0m (esc)
213 \x1b[0;32;1mA \x1b[0m\x1b[0;32;1mcopied\x1b[0m (esc)
213 \x1b[0;32;1mA \x1b[0m\x1b[0;32;1mcopied\x1b[0m (esc)
214 \x1b[0;0m modified\x1b[0m (esc)
214 \x1b[0;0m modified\x1b[0m (esc)
215 \x1b[0;31;1mR \x1b[0m\x1b[0;31;1mremoved\x1b[0m (esc)
215 \x1b[0;31;1mR \x1b[0m\x1b[0;31;1mremoved\x1b[0m (esc)
216 \x1b[0;36;1;4m! \x1b[0m\x1b[0;36;1;4mdeleted\x1b[0m (esc)
216 \x1b[0;36;1;4m! \x1b[0m\x1b[0;36;1;4mdeleted\x1b[0m (esc)
217 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4munknown\x1b[0m (esc)
217 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4munknown\x1b[0m (esc)
218 \x1b[0;30;1mI \x1b[0m\x1b[0;30;1mignored\x1b[0m (esc)
218 \x1b[0;30;1mI \x1b[0m\x1b[0;30;1mignored\x1b[0m (esc)
219 \x1b[0;0mC \x1b[0m\x1b[0;0m.hgignore\x1b[0m (esc)
219 \x1b[0;0mC \x1b[0m\x1b[0;0m.hgignore\x1b[0m (esc)
220 \x1b[0;0mC \x1b[0m\x1b[0;0mmodified\x1b[0m (esc)
220 \x1b[0;0mC \x1b[0m\x1b[0;0mmodified\x1b[0m (esc)
221
221
222
222
223 hg status -A (with terminfo color):
223 hg status -A (with terminfo color):
224
224
225 #if tic
225 #if tic
226
226
227 $ mkdir "$TESTTMP/terminfo"
227 $ mkdir "$TESTTMP/terminfo"
228 $ TERMINFO="$TESTTMP/terminfo" tic "$TESTDIR/hgterm.ti"
228 $ TERMINFO="$TESTTMP/terminfo" tic "$TESTDIR/hgterm.ti"
229 $ TERM=hgterm TERMINFO="$TESTTMP/terminfo" hg status --config color.mode=terminfo -A
229 $ TERM=hgterm TERMINFO="$TESTTMP/terminfo" hg status --config color.mode=terminfo -A
230 \x1b[30m\x1b[32m\x1b[1mA \x1b[30m\x1b[30m\x1b[32m\x1b[1madded\x1b[30m (esc)
230 \x1b[30m\x1b[32m\x1b[1mA \x1b[30m\x1b[30m\x1b[32m\x1b[1madded\x1b[30m (esc)
231 \x1b[30m\x1b[32m\x1b[1mA \x1b[30m\x1b[30m\x1b[32m\x1b[1mcopied\x1b[30m (esc)
231 \x1b[30m\x1b[32m\x1b[1mA \x1b[30m\x1b[30m\x1b[32m\x1b[1mcopied\x1b[30m (esc)
232 \x1b[30m\x1b[30m modified\x1b[30m (esc)
232 \x1b[30m\x1b[30m modified\x1b[30m (esc)
233 \x1b[30m\x1b[31m\x1b[1mR \x1b[30m\x1b[30m\x1b[31m\x1b[1mremoved\x1b[30m (esc)
233 \x1b[30m\x1b[31m\x1b[1mR \x1b[30m\x1b[30m\x1b[31m\x1b[1mremoved\x1b[30m (esc)
234 \x1b[30m\x1b[36m\x1b[1m\x1b[4m! \x1b[30m\x1b[30m\x1b[36m\x1b[1m\x1b[4mdeleted\x1b[30m (esc)
234 \x1b[30m\x1b[36m\x1b[1m\x1b[4m! \x1b[30m\x1b[30m\x1b[36m\x1b[1m\x1b[4mdeleted\x1b[30m (esc)
235 \x1b[30m\x1b[35m\x1b[1m\x1b[4m? \x1b[30m\x1b[30m\x1b[35m\x1b[1m\x1b[4munknown\x1b[30m (esc)
235 \x1b[30m\x1b[35m\x1b[1m\x1b[4m? \x1b[30m\x1b[30m\x1b[35m\x1b[1m\x1b[4munknown\x1b[30m (esc)
236 \x1b[30m\x1b[30m\x1b[1mI \x1b[30m\x1b[30m\x1b[30m\x1b[1mignored\x1b[30m (esc)
236 \x1b[30m\x1b[30m\x1b[1mI \x1b[30m\x1b[30m\x1b[30m\x1b[1mignored\x1b[30m (esc)
237 \x1b[30m\x1b[30mC \x1b[30m\x1b[30m\x1b[30m.hgignore\x1b[30m (esc)
237 \x1b[30m\x1b[30mC \x1b[30m\x1b[30m\x1b[30m.hgignore\x1b[30m (esc)
238 \x1b[30m\x1b[30mC \x1b[30m\x1b[30m\x1b[30mmodified\x1b[30m (esc)
238 \x1b[30m\x1b[30mC \x1b[30m\x1b[30m\x1b[30mmodified\x1b[30m (esc)
239
239
240 The user can define effects with raw terminfo codes:
240 The user can define effects with raw terminfo codes:
241
241
242 $ cat <<EOF >> $HGRCPATH
242 $ cat <<EOF >> $HGRCPATH
243 > # Completely bogus code for dim
243 > # Completely bogus code for dim
244 > terminfo.dim = \E[88m
244 > terminfo.dim = \E[88m
245 > # We can override what's in the terminfo database, too
245 > # We can override what's in the terminfo database, too
246 > terminfo.bold = \E[2m
246 > terminfo.bold = \E[2m
247 > EOF
247 > EOF
248 $ TERM=hgterm TERMINFO="$TESTTMP/terminfo" hg status --config color.mode=terminfo --config color.status.clean=dim -A
248 $ TERM=hgterm TERMINFO="$TESTTMP/terminfo" hg status --config color.mode=terminfo --config color.status.clean=dim -A
249 \x1b[30m\x1b[32m\x1b[2mA \x1b[30m\x1b[30m\x1b[32m\x1b[2madded\x1b[30m (esc)
249 \x1b[30m\x1b[32m\x1b[2mA \x1b[30m\x1b[30m\x1b[32m\x1b[2madded\x1b[30m (esc)
250 \x1b[30m\x1b[32m\x1b[2mA \x1b[30m\x1b[30m\x1b[32m\x1b[2mcopied\x1b[30m (esc)
250 \x1b[30m\x1b[32m\x1b[2mA \x1b[30m\x1b[30m\x1b[32m\x1b[2mcopied\x1b[30m (esc)
251 \x1b[30m\x1b[30m modified\x1b[30m (esc)
251 \x1b[30m\x1b[30m modified\x1b[30m (esc)
252 \x1b[30m\x1b[31m\x1b[2mR \x1b[30m\x1b[30m\x1b[31m\x1b[2mremoved\x1b[30m (esc)
252 \x1b[30m\x1b[31m\x1b[2mR \x1b[30m\x1b[30m\x1b[31m\x1b[2mremoved\x1b[30m (esc)
253 \x1b[30m\x1b[36m\x1b[2m\x1b[4m! \x1b[30m\x1b[30m\x1b[36m\x1b[2m\x1b[4mdeleted\x1b[30m (esc)
253 \x1b[30m\x1b[36m\x1b[2m\x1b[4m! \x1b[30m\x1b[30m\x1b[36m\x1b[2m\x1b[4mdeleted\x1b[30m (esc)
254 \x1b[30m\x1b[35m\x1b[2m\x1b[4m? \x1b[30m\x1b[30m\x1b[35m\x1b[2m\x1b[4munknown\x1b[30m (esc)
254 \x1b[30m\x1b[35m\x1b[2m\x1b[4m? \x1b[30m\x1b[30m\x1b[35m\x1b[2m\x1b[4munknown\x1b[30m (esc)
255 \x1b[30m\x1b[30m\x1b[2mI \x1b[30m\x1b[30m\x1b[30m\x1b[2mignored\x1b[30m (esc)
255 \x1b[30m\x1b[30m\x1b[2mI \x1b[30m\x1b[30m\x1b[30m\x1b[2mignored\x1b[30m (esc)
256 \x1b[30m\x1b[88mC \x1b[30m\x1b[30m\x1b[88m.hgignore\x1b[30m (esc)
256 \x1b[30m\x1b[88mC \x1b[30m\x1b[30m\x1b[88m.hgignore\x1b[30m (esc)
257 \x1b[30m\x1b[88mC \x1b[30m\x1b[30m\x1b[88mmodified\x1b[30m (esc)
257 \x1b[30m\x1b[88mC \x1b[30m\x1b[30m\x1b[88mmodified\x1b[30m (esc)
258
258
259 #endif
259 #endif
260
260
261
261
262 $ echo "^ignoreddir$" > .hgignore
262 $ echo "^ignoreddir$" > .hgignore
263 $ mkdir ignoreddir
263 $ mkdir ignoreddir
264 $ touch ignoreddir/file
264 $ touch ignoreddir/file
265
265
266 hg status ignoreddir/file:
266 hg status ignoreddir/file:
267
267
268 $ hg status ignoreddir/file
268 $ hg status ignoreddir/file
269
269
270 hg status -i ignoreddir/file:
270 hg status -i ignoreddir/file:
271
271
272 $ hg status -i ignoreddir/file
272 $ hg status -i ignoreddir/file
273 \x1b[0;30;1mI \x1b[0m\x1b[0;30;1mignoreddir/file\x1b[0m (esc)
273 \x1b[0;30;1mI \x1b[0m\x1b[0;30;1mignoreddir/file\x1b[0m (esc)
274 $ cd ..
274 $ cd ..
275
275
276 check 'status -q' and some combinations
276 check 'status -q' and some combinations
277
277
278 $ hg init repo3
278 $ hg init repo3
279 $ cd repo3
279 $ cd repo3
280 $ touch modified removed deleted ignored
280 $ touch modified removed deleted ignored
281 $ echo "^ignored$" > .hgignore
281 $ echo "^ignored$" > .hgignore
282 $ hg commit -A -m 'initial checkin'
282 $ hg commit -A -m 'initial checkin'
283 adding .hgignore
283 adding .hgignore
284 adding deleted
284 adding deleted
285 adding modified
285 adding modified
286 adding removed
286 adding removed
287 $ touch added unknown ignored
287 $ touch added unknown ignored
288 $ hg add added
288 $ hg add added
289 $ echo "test" >> modified
289 $ echo "test" >> modified
290 $ hg remove removed
290 $ hg remove removed
291 $ rm deleted
291 $ rm deleted
292 $ hg copy modified copied
292 $ hg copy modified copied
293
293
294 test unknown color
294 test unknown color
295
295
296 $ hg --config color.status.modified=periwinkle status
296 $ hg --config color.status.modified=periwinkle status
297 ignoring unknown color/effect 'periwinkle' (configured in color.status.modified)
297 ignoring unknown color/effect 'periwinkle' (configured in color.status.modified)
298 ignoring unknown color/effect 'periwinkle' (configured in color.status.modified)
298 ignoring unknown color/effect 'periwinkle' (configured in color.status.modified)
299 ignoring unknown color/effect 'periwinkle' (configured in color.status.modified)
299 M modified
300 M modified
300 \x1b[0;32;1mA \x1b[0m\x1b[0;32;1madded\x1b[0m (esc)
301 \x1b[0;32;1mA \x1b[0m\x1b[0;32;1madded\x1b[0m (esc)
301 \x1b[0;32;1mA \x1b[0m\x1b[0;32;1mcopied\x1b[0m (esc)
302 \x1b[0;32;1mA \x1b[0m\x1b[0;32;1mcopied\x1b[0m (esc)
302 \x1b[0;31;1mR \x1b[0m\x1b[0;31;1mremoved\x1b[0m (esc)
303 \x1b[0;31;1mR \x1b[0m\x1b[0;31;1mremoved\x1b[0m (esc)
303 \x1b[0;36;1;4m! \x1b[0m\x1b[0;36;1;4mdeleted\x1b[0m (esc)
304 \x1b[0;36;1;4m! \x1b[0m\x1b[0;36;1;4mdeleted\x1b[0m (esc)
304 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4munknown\x1b[0m (esc)
305 \x1b[0;35;1;4m? \x1b[0m\x1b[0;35;1;4munknown\x1b[0m (esc)
305
306
306 Run status with 2 different flags.
307 Run status with 2 different flags.
307 Check if result is the same or different.
308 Check if result is the same or different.
308 If result is not as expected, raise error
309 If result is not as expected, raise error
309
310
310 $ assert() {
311 $ assert() {
311 > hg status $1 > ../a
312 > hg status $1 > ../a
312 > hg status $2 > ../b
313 > hg status $2 > ../b
313 > if diff ../a ../b > /dev/null; then
314 > if diff ../a ../b > /dev/null; then
314 > out=0
315 > out=0
315 > else
316 > else
316 > out=1
317 > out=1
317 > fi
318 > fi
318 > if [ $3 -eq 0 ]; then
319 > if [ $3 -eq 0 ]; then
319 > df="same"
320 > df="same"
320 > else
321 > else
321 > df="different"
322 > df="different"
322 > fi
323 > fi
323 > if [ $out -ne $3 ]; then
324 > if [ $out -ne $3 ]; then
324 > echo "Error on $1 and $2, should be $df."
325 > echo "Error on $1 and $2, should be $df."
325 > fi
326 > fi
326 > }
327 > }
327
328
328 assert flag1 flag2 [0-same | 1-different]
329 assert flag1 flag2 [0-same | 1-different]
329
330
330 $ assert "-q" "-mard" 0
331 $ assert "-q" "-mard" 0
331 $ assert "-A" "-marduicC" 0
332 $ assert "-A" "-marduicC" 0
332 $ assert "-qA" "-mardcC" 0
333 $ assert "-qA" "-mardcC" 0
333 $ assert "-qAui" "-A" 0
334 $ assert "-qAui" "-A" 0
334 $ assert "-qAu" "-marducC" 0
335 $ assert "-qAu" "-marducC" 0
335 $ assert "-qAi" "-mardicC" 0
336 $ assert "-qAi" "-mardicC" 0
336 $ assert "-qu" "-u" 0
337 $ assert "-qu" "-u" 0
337 $ assert "-q" "-u" 1
338 $ assert "-q" "-u" 1
338 $ assert "-m" "-a" 1
339 $ assert "-m" "-a" 1
339 $ assert "-r" "-d" 1
340 $ assert "-r" "-d" 1
340 $ cd ..
341 $ cd ..
341
342
342 test 'resolve -l'
343 test 'resolve -l'
343
344
344 $ hg init repo4
345 $ hg init repo4
345 $ cd repo4
346 $ cd repo4
346 $ echo "file a" > a
347 $ echo "file a" > a
347 $ echo "file b" > b
348 $ echo "file b" > b
348 $ hg add a b
349 $ hg add a b
349 $ hg commit -m "initial"
350 $ hg commit -m "initial"
350 $ echo "file a change 1" > a
351 $ echo "file a change 1" > a
351 $ echo "file b change 1" > b
352 $ echo "file b change 1" > b
352 $ hg commit -m "head 1"
353 $ hg commit -m "head 1"
353 $ hg update 0
354 $ hg update 0
354 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
355 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
355 $ echo "file a change 2" > a
356 $ echo "file a change 2" > a
356 $ echo "file b change 2" > b
357 $ echo "file b change 2" > b
357 $ hg commit -m "head 2"
358 $ hg commit -m "head 2"
358 created new head
359 created new head
359 $ hg merge
360 $ hg merge
360 merging a
361 merging a
361 merging b
362 merging b
362 warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
363 warning: conflicts while merging a! (edit, then use 'hg resolve --mark')
363 warning: conflicts while merging b! (edit, then use 'hg resolve --mark')
364 warning: conflicts while merging b! (edit, then use 'hg resolve --mark')
364 0 files updated, 0 files merged, 0 files removed, 2 files unresolved
365 0 files updated, 0 files merged, 0 files removed, 2 files unresolved
365 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
366 use 'hg resolve' to retry unresolved file merges or 'hg update -C .' to abandon
366 [1]
367 [1]
367 $ hg resolve -m b
368 $ hg resolve -m b
368
369
369 hg resolve with one unresolved, one resolved:
370 hg resolve with one unresolved, one resolved:
370
371
371 $ hg resolve -l
372 $ hg resolve -l
372 \x1b[0;31;1mU \x1b[0m\x1b[0;31;1ma\x1b[0m (esc)
373 \x1b[0;31;1mU \x1b[0m\x1b[0;31;1ma\x1b[0m (esc)
373 \x1b[0;32;1mR \x1b[0m\x1b[0;32;1mb\x1b[0m (esc)
374 \x1b[0;32;1mR \x1b[0m\x1b[0;32;1mb\x1b[0m (esc)
374
375
375 color coding of error message with current availability of curses
376 color coding of error message with current availability of curses
376
377
377 $ hg unknowncommand > /dev/null
378 $ hg unknowncommand > /dev/null
378 hg: unknown command 'unknowncommand'
379 hg: unknown command 'unknowncommand'
379 [255]
380 [255]
380
381
381 color coding of error message without curses
382 color coding of error message without curses
382
383
383 $ echo 'raise ImportError' > curses.py
384 $ echo 'raise ImportError' > curses.py
384 $ PYTHONPATH=`pwd`:$PYTHONPATH hg unknowncommand > /dev/null
385 $ PYTHONPATH=`pwd`:$PYTHONPATH hg unknowncommand > /dev/null
385 hg: unknown command 'unknowncommand'
386 hg: unknown command 'unknowncommand'
386 [255]
387 [255]
387
388
388 $ cd ..
389 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now