##// END OF EJS Templates
localrepo: move repo creation logic out of localrepository.__init__ (API)...
Gregory Szorc -
r39584:7ce9dea3 default
parent child Browse files
Show More
@@ -1,815 +1,815 b''
1 # keyword.py - $Keyword$ expansion for Mercurial
1 # keyword.py - $Keyword$ expansion for Mercurial
2 #
2 #
3 # Copyright 2007-2015 Christian Ebert <blacktrash@gmx.net>
3 # Copyright 2007-2015 Christian Ebert <blacktrash@gmx.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 #
7 #
8 # $Id$
8 # $Id$
9 #
9 #
10 # Keyword expansion hack against the grain of a Distributed SCM
10 # Keyword expansion hack against the grain of a Distributed SCM
11 #
11 #
12 # There are many good reasons why this is not needed in a distributed
12 # There are many good reasons why this is not needed in a distributed
13 # SCM, still it may be useful in very small projects based on single
13 # SCM, still it may be useful in very small projects based on single
14 # files (like LaTeX packages), that are mostly addressed to an
14 # files (like LaTeX packages), that are mostly addressed to an
15 # audience not running a version control system.
15 # audience not running a version control system.
16 #
16 #
17 # For in-depth discussion refer to
17 # For in-depth discussion refer to
18 # <https://mercurial-scm.org/wiki/KeywordPlan>.
18 # <https://mercurial-scm.org/wiki/KeywordPlan>.
19 #
19 #
20 # Keyword expansion is based on Mercurial's changeset template mappings.
20 # Keyword expansion is based on Mercurial's changeset template mappings.
21 #
21 #
22 # Binary files are not touched.
22 # Binary files are not touched.
23 #
23 #
24 # Files to act upon/ignore are specified in the [keyword] section.
24 # Files to act upon/ignore are specified in the [keyword] section.
25 # Customized keyword template mappings in the [keywordmaps] section.
25 # Customized keyword template mappings in the [keywordmaps] section.
26 #
26 #
27 # Run 'hg help keyword' and 'hg kwdemo' to get info on configuration.
27 # Run 'hg help keyword' and 'hg kwdemo' to get info on configuration.
28
28
29 '''expand keywords in tracked files
29 '''expand keywords in tracked files
30
30
31 This extension expands RCS/CVS-like or self-customized $Keywords$ in
31 This extension expands RCS/CVS-like or self-customized $Keywords$ in
32 tracked text files selected by your configuration.
32 tracked text files selected by your configuration.
33
33
34 Keywords are only expanded in local repositories and not stored in the
34 Keywords are only expanded in local repositories and not stored in the
35 change history. The mechanism can be regarded as a convenience for the
35 change history. The mechanism can be regarded as a convenience for the
36 current user or for archive distribution.
36 current user or for archive distribution.
37
37
38 Keywords expand to the changeset data pertaining to the latest change
38 Keywords expand to the changeset data pertaining to the latest change
39 relative to the working directory parent of each file.
39 relative to the working directory parent of each file.
40
40
41 Configuration is done in the [keyword], [keywordset] and [keywordmaps]
41 Configuration is done in the [keyword], [keywordset] and [keywordmaps]
42 sections of hgrc files.
42 sections of hgrc files.
43
43
44 Example::
44 Example::
45
45
46 [keyword]
46 [keyword]
47 # expand keywords in every python file except those matching "x*"
47 # expand keywords in every python file except those matching "x*"
48 **.py =
48 **.py =
49 x* = ignore
49 x* = ignore
50
50
51 [keywordset]
51 [keywordset]
52 # prefer svn- over cvs-like default keywordmaps
52 # prefer svn- over cvs-like default keywordmaps
53 svn = True
53 svn = True
54
54
55 .. note::
55 .. note::
56
56
57 The more specific you are in your filename patterns the less you
57 The more specific you are in your filename patterns the less you
58 lose speed in huge repositories.
58 lose speed in huge repositories.
59
59
60 For [keywordmaps] template mapping and expansion demonstration and
60 For [keywordmaps] template mapping and expansion demonstration and
61 control run :hg:`kwdemo`. See :hg:`help templates` for a list of
61 control run :hg:`kwdemo`. See :hg:`help templates` for a list of
62 available templates and filters.
62 available templates and filters.
63
63
64 Three additional date template filters are provided:
64 Three additional date template filters are provided:
65
65
66 :``utcdate``: "2006/09/18 15:13:13"
66 :``utcdate``: "2006/09/18 15:13:13"
67 :``svnutcdate``: "2006-09-18 15:13:13Z"
67 :``svnutcdate``: "2006-09-18 15:13:13Z"
68 :``svnisodate``: "2006-09-18 08:13:13 -700 (Mon, 18 Sep 2006)"
68 :``svnisodate``: "2006-09-18 08:13:13 -700 (Mon, 18 Sep 2006)"
69
69
70 The default template mappings (view with :hg:`kwdemo -d`) can be
70 The default template mappings (view with :hg:`kwdemo -d`) can be
71 replaced with customized keywords and templates. Again, run
71 replaced with customized keywords and templates. Again, run
72 :hg:`kwdemo` to control the results of your configuration changes.
72 :hg:`kwdemo` to control the results of your configuration changes.
73
73
74 Before changing/disabling active keywords, you must run :hg:`kwshrink`
74 Before changing/disabling active keywords, you must run :hg:`kwshrink`
75 to avoid storing expanded keywords in the change history.
75 to avoid storing expanded keywords in the change history.
76
76
77 To force expansion after enabling it, or a configuration change, run
77 To force expansion after enabling it, or a configuration change, run
78 :hg:`kwexpand`.
78 :hg:`kwexpand`.
79
79
80 Expansions spanning more than one line and incremental expansions,
80 Expansions spanning more than one line and incremental expansions,
81 like CVS' $Log$, are not supported. A keyword template map "Log =
81 like CVS' $Log$, are not supported. A keyword template map "Log =
82 {desc}" expands to the first line of the changeset description.
82 {desc}" expands to the first line of the changeset description.
83 '''
83 '''
84
84
85
85
86 from __future__ import absolute_import
86 from __future__ import absolute_import
87
87
88 import os
88 import os
89 import re
89 import re
90 import weakref
90 import weakref
91
91
92 from mercurial.i18n import _
92 from mercurial.i18n import _
93 from mercurial.hgweb import webcommands
93 from mercurial.hgweb import webcommands
94
94
95 from mercurial import (
95 from mercurial import (
96 cmdutil,
96 cmdutil,
97 context,
97 context,
98 dispatch,
98 dispatch,
99 error,
99 error,
100 extensions,
100 extensions,
101 filelog,
101 filelog,
102 localrepo,
102 localrepo,
103 logcmdutil,
103 logcmdutil,
104 match,
104 match,
105 patch,
105 patch,
106 pathutil,
106 pathutil,
107 pycompat,
107 pycompat,
108 registrar,
108 registrar,
109 scmutil,
109 scmutil,
110 templatefilters,
110 templatefilters,
111 templateutil,
111 templateutil,
112 util,
112 util,
113 )
113 )
114 from mercurial.utils import (
114 from mercurial.utils import (
115 dateutil,
115 dateutil,
116 stringutil,
116 stringutil,
117 )
117 )
118
118
119 cmdtable = {}
119 cmdtable = {}
120 command = registrar.command(cmdtable)
120 command = registrar.command(cmdtable)
121 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
121 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
122 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
122 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
123 # be specifying the version(s) of Mercurial they are tested with, or
123 # be specifying the version(s) of Mercurial they are tested with, or
124 # leave the attribute unspecified.
124 # leave the attribute unspecified.
125 testedwith = 'ships-with-hg-core'
125 testedwith = 'ships-with-hg-core'
126
126
127 # hg commands that do not act on keywords
127 # hg commands that do not act on keywords
128 nokwcommands = ('add addremove annotate bundle export grep incoming init log'
128 nokwcommands = ('add addremove annotate bundle export grep incoming init log'
129 ' outgoing push tip verify convert email glog')
129 ' outgoing push tip verify convert email glog')
130
130
131 # webcommands that do not act on keywords
131 # webcommands that do not act on keywords
132 nokwwebcommands = ('annotate changeset rev filediff diff comparison')
132 nokwwebcommands = ('annotate changeset rev filediff diff comparison')
133
133
134 # hg commands that trigger expansion only when writing to working dir,
134 # hg commands that trigger expansion only when writing to working dir,
135 # not when reading filelog, and unexpand when reading from working dir
135 # not when reading filelog, and unexpand when reading from working dir
136 restricted = ('merge kwexpand kwshrink record qrecord resolve transplant'
136 restricted = ('merge kwexpand kwshrink record qrecord resolve transplant'
137 ' unshelve rebase graft backout histedit fetch')
137 ' unshelve rebase graft backout histedit fetch')
138
138
139 # names of extensions using dorecord
139 # names of extensions using dorecord
140 recordextensions = 'record'
140 recordextensions = 'record'
141
141
142 colortable = {
142 colortable = {
143 'kwfiles.enabled': 'green bold',
143 'kwfiles.enabled': 'green bold',
144 'kwfiles.deleted': 'cyan bold underline',
144 'kwfiles.deleted': 'cyan bold underline',
145 'kwfiles.enabledunknown': 'green',
145 'kwfiles.enabledunknown': 'green',
146 'kwfiles.ignored': 'bold',
146 'kwfiles.ignored': 'bold',
147 'kwfiles.ignoredunknown': 'none'
147 'kwfiles.ignoredunknown': 'none'
148 }
148 }
149
149
150 templatefilter = registrar.templatefilter()
150 templatefilter = registrar.templatefilter()
151
151
152 configtable = {}
152 configtable = {}
153 configitem = registrar.configitem(configtable)
153 configitem = registrar.configitem(configtable)
154
154
155 configitem('keywordset', 'svn',
155 configitem('keywordset', 'svn',
156 default=False,
156 default=False,
157 )
157 )
158 # date like in cvs' $Date
158 # date like in cvs' $Date
159 @templatefilter('utcdate', intype=templateutil.date)
159 @templatefilter('utcdate', intype=templateutil.date)
160 def utcdate(date):
160 def utcdate(date):
161 '''Date. Returns a UTC-date in this format: "2009/08/18 11:00:13".
161 '''Date. Returns a UTC-date in this format: "2009/08/18 11:00:13".
162 '''
162 '''
163 dateformat = '%Y/%m/%d %H:%M:%S'
163 dateformat = '%Y/%m/%d %H:%M:%S'
164 return dateutil.datestr((date[0], 0), dateformat)
164 return dateutil.datestr((date[0], 0), dateformat)
165 # date like in svn's $Date
165 # date like in svn's $Date
166 @templatefilter('svnisodate', intype=templateutil.date)
166 @templatefilter('svnisodate', intype=templateutil.date)
167 def svnisodate(date):
167 def svnisodate(date):
168 '''Date. Returns a date in this format: "2009-08-18 13:00:13
168 '''Date. Returns a date in this format: "2009-08-18 13:00:13
169 +0200 (Tue, 18 Aug 2009)".
169 +0200 (Tue, 18 Aug 2009)".
170 '''
170 '''
171 return dateutil.datestr(date, '%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)')
171 return dateutil.datestr(date, '%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)')
172 # date like in svn's $Id
172 # date like in svn's $Id
173 @templatefilter('svnutcdate', intype=templateutil.date)
173 @templatefilter('svnutcdate', intype=templateutil.date)
174 def svnutcdate(date):
174 def svnutcdate(date):
175 '''Date. Returns a UTC-date in this format: "2009-08-18
175 '''Date. Returns a UTC-date in this format: "2009-08-18
176 11:00:13Z".
176 11:00:13Z".
177 '''
177 '''
178 dateformat = '%Y-%m-%d %H:%M:%SZ'
178 dateformat = '%Y-%m-%d %H:%M:%SZ'
179 return dateutil.datestr((date[0], 0), dateformat)
179 return dateutil.datestr((date[0], 0), dateformat)
180
180
181 # make keyword tools accessible
181 # make keyword tools accessible
182 kwtools = {'hgcmd': ''}
182 kwtools = {'hgcmd': ''}
183
183
184 def _defaultkwmaps(ui):
184 def _defaultkwmaps(ui):
185 '''Returns default keywordmaps according to keywordset configuration.'''
185 '''Returns default keywordmaps according to keywordset configuration.'''
186 templates = {
186 templates = {
187 'Revision': '{node|short}',
187 'Revision': '{node|short}',
188 'Author': '{author|user}',
188 'Author': '{author|user}',
189 }
189 }
190 kwsets = ({
190 kwsets = ({
191 'Date': '{date|utcdate}',
191 'Date': '{date|utcdate}',
192 'RCSfile': '{file|basename},v',
192 'RCSfile': '{file|basename},v',
193 'RCSFile': '{file|basename},v', # kept for backwards compatibility
193 'RCSFile': '{file|basename},v', # kept for backwards compatibility
194 # with hg-keyword
194 # with hg-keyword
195 'Source': '{root}/{file},v',
195 'Source': '{root}/{file},v',
196 'Id': '{file|basename},v {node|short} {date|utcdate} {author|user}',
196 'Id': '{file|basename},v {node|short} {date|utcdate} {author|user}',
197 'Header': '{root}/{file},v {node|short} {date|utcdate} {author|user}',
197 'Header': '{root}/{file},v {node|short} {date|utcdate} {author|user}',
198 }, {
198 }, {
199 'Date': '{date|svnisodate}',
199 'Date': '{date|svnisodate}',
200 'Id': '{file|basename},v {node|short} {date|svnutcdate} {author|user}',
200 'Id': '{file|basename},v {node|short} {date|svnutcdate} {author|user}',
201 'LastChangedRevision': '{node|short}',
201 'LastChangedRevision': '{node|short}',
202 'LastChangedBy': '{author|user}',
202 'LastChangedBy': '{author|user}',
203 'LastChangedDate': '{date|svnisodate}',
203 'LastChangedDate': '{date|svnisodate}',
204 })
204 })
205 templates.update(kwsets[ui.configbool('keywordset', 'svn')])
205 templates.update(kwsets[ui.configbool('keywordset', 'svn')])
206 return templates
206 return templates
207
207
208 def _shrinktext(text, subfunc):
208 def _shrinktext(text, subfunc):
209 '''Helper for keyword expansion removal in text.
209 '''Helper for keyword expansion removal in text.
210 Depending on subfunc also returns number of substitutions.'''
210 Depending on subfunc also returns number of substitutions.'''
211 return subfunc(br'$\1$', text)
211 return subfunc(br'$\1$', text)
212
212
213 def _preselect(wstatus, changed):
213 def _preselect(wstatus, changed):
214 '''Retrieves modified and added files from a working directory state
214 '''Retrieves modified and added files from a working directory state
215 and returns the subset of each contained in given changed files
215 and returns the subset of each contained in given changed files
216 retrieved from a change context.'''
216 retrieved from a change context.'''
217 modified = [f for f in wstatus.modified if f in changed]
217 modified = [f for f in wstatus.modified if f in changed]
218 added = [f for f in wstatus.added if f in changed]
218 added = [f for f in wstatus.added if f in changed]
219 return modified, added
219 return modified, added
220
220
221
221
222 class kwtemplater(object):
222 class kwtemplater(object):
223 '''
223 '''
224 Sets up keyword templates, corresponding keyword regex, and
224 Sets up keyword templates, corresponding keyword regex, and
225 provides keyword substitution functions.
225 provides keyword substitution functions.
226 '''
226 '''
227
227
228 def __init__(self, ui, repo, inc, exc):
228 def __init__(self, ui, repo, inc, exc):
229 self.ui = ui
229 self.ui = ui
230 self._repo = weakref.ref(repo)
230 self._repo = weakref.ref(repo)
231 self.match = match.match(repo.root, '', [], inc, exc)
231 self.match = match.match(repo.root, '', [], inc, exc)
232 self.restrict = kwtools['hgcmd'] in restricted.split()
232 self.restrict = kwtools['hgcmd'] in restricted.split()
233 self.postcommit = False
233 self.postcommit = False
234
234
235 kwmaps = self.ui.configitems('keywordmaps')
235 kwmaps = self.ui.configitems('keywordmaps')
236 if kwmaps: # override default templates
236 if kwmaps: # override default templates
237 self.templates = dict(kwmaps)
237 self.templates = dict(kwmaps)
238 else:
238 else:
239 self.templates = _defaultkwmaps(self.ui)
239 self.templates = _defaultkwmaps(self.ui)
240
240
241 @property
241 @property
242 def repo(self):
242 def repo(self):
243 return self._repo()
243 return self._repo()
244
244
245 @util.propertycache
245 @util.propertycache
246 def escape(self):
246 def escape(self):
247 '''Returns bar-separated and escaped keywords.'''
247 '''Returns bar-separated and escaped keywords.'''
248 return '|'.join(map(stringutil.reescape, self.templates.keys()))
248 return '|'.join(map(stringutil.reescape, self.templates.keys()))
249
249
250 @util.propertycache
250 @util.propertycache
251 def rekw(self):
251 def rekw(self):
252 '''Returns regex for unexpanded keywords.'''
252 '''Returns regex for unexpanded keywords.'''
253 return re.compile(br'\$(%s)\$' % self.escape)
253 return re.compile(br'\$(%s)\$' % self.escape)
254
254
255 @util.propertycache
255 @util.propertycache
256 def rekwexp(self):
256 def rekwexp(self):
257 '''Returns regex for expanded keywords.'''
257 '''Returns regex for expanded keywords.'''
258 return re.compile(br'\$(%s): [^$\n\r]*? \$' % self.escape)
258 return re.compile(br'\$(%s): [^$\n\r]*? \$' % self.escape)
259
259
260 def substitute(self, data, path, ctx, subfunc):
260 def substitute(self, data, path, ctx, subfunc):
261 '''Replaces keywords in data with expanded template.'''
261 '''Replaces keywords in data with expanded template.'''
262 def kwsub(mobj):
262 def kwsub(mobj):
263 kw = mobj.group(1)
263 kw = mobj.group(1)
264 ct = logcmdutil.maketemplater(self.ui, self.repo,
264 ct = logcmdutil.maketemplater(self.ui, self.repo,
265 self.templates[kw])
265 self.templates[kw])
266 self.ui.pushbuffer()
266 self.ui.pushbuffer()
267 ct.show(ctx, root=self.repo.root, file=path)
267 ct.show(ctx, root=self.repo.root, file=path)
268 ekw = templatefilters.firstline(self.ui.popbuffer())
268 ekw = templatefilters.firstline(self.ui.popbuffer())
269 return '$%s: %s $' % (kw, ekw)
269 return '$%s: %s $' % (kw, ekw)
270 return subfunc(kwsub, data)
270 return subfunc(kwsub, data)
271
271
272 def linkctx(self, path, fileid):
272 def linkctx(self, path, fileid):
273 '''Similar to filelog.linkrev, but returns a changectx.'''
273 '''Similar to filelog.linkrev, but returns a changectx.'''
274 return self.repo.filectx(path, fileid=fileid).changectx()
274 return self.repo.filectx(path, fileid=fileid).changectx()
275
275
276 def expand(self, path, node, data):
276 def expand(self, path, node, data):
277 '''Returns data with keywords expanded.'''
277 '''Returns data with keywords expanded.'''
278 if (not self.restrict and self.match(path)
278 if (not self.restrict and self.match(path)
279 and not stringutil.binary(data)):
279 and not stringutil.binary(data)):
280 ctx = self.linkctx(path, node)
280 ctx = self.linkctx(path, node)
281 return self.substitute(data, path, ctx, self.rekw.sub)
281 return self.substitute(data, path, ctx, self.rekw.sub)
282 return data
282 return data
283
283
284 def iskwfile(self, cand, ctx):
284 def iskwfile(self, cand, ctx):
285 '''Returns subset of candidates which are configured for keyword
285 '''Returns subset of candidates which are configured for keyword
286 expansion but are not symbolic links.'''
286 expansion but are not symbolic links.'''
287 return [f for f in cand if self.match(f) and 'l' not in ctx.flags(f)]
287 return [f for f in cand if self.match(f) and 'l' not in ctx.flags(f)]
288
288
289 def overwrite(self, ctx, candidates, lookup, expand, rekw=False):
289 def overwrite(self, ctx, candidates, lookup, expand, rekw=False):
290 '''Overwrites selected files expanding/shrinking keywords.'''
290 '''Overwrites selected files expanding/shrinking keywords.'''
291 if self.restrict or lookup or self.postcommit: # exclude kw_copy
291 if self.restrict or lookup or self.postcommit: # exclude kw_copy
292 candidates = self.iskwfile(candidates, ctx)
292 candidates = self.iskwfile(candidates, ctx)
293 if not candidates:
293 if not candidates:
294 return
294 return
295 kwcmd = self.restrict and lookup # kwexpand/kwshrink
295 kwcmd = self.restrict and lookup # kwexpand/kwshrink
296 if self.restrict or expand and lookup:
296 if self.restrict or expand and lookup:
297 mf = ctx.manifest()
297 mf = ctx.manifest()
298 if self.restrict or rekw:
298 if self.restrict or rekw:
299 re_kw = self.rekw
299 re_kw = self.rekw
300 else:
300 else:
301 re_kw = self.rekwexp
301 re_kw = self.rekwexp
302 if expand:
302 if expand:
303 msg = _('overwriting %s expanding keywords\n')
303 msg = _('overwriting %s expanding keywords\n')
304 else:
304 else:
305 msg = _('overwriting %s shrinking keywords\n')
305 msg = _('overwriting %s shrinking keywords\n')
306 for f in candidates:
306 for f in candidates:
307 if self.restrict:
307 if self.restrict:
308 data = self.repo.file(f).read(mf[f])
308 data = self.repo.file(f).read(mf[f])
309 else:
309 else:
310 data = self.repo.wread(f)
310 data = self.repo.wread(f)
311 if stringutil.binary(data):
311 if stringutil.binary(data):
312 continue
312 continue
313 if expand:
313 if expand:
314 parents = ctx.parents()
314 parents = ctx.parents()
315 if lookup:
315 if lookup:
316 ctx = self.linkctx(f, mf[f])
316 ctx = self.linkctx(f, mf[f])
317 elif self.restrict and len(parents) > 1:
317 elif self.restrict and len(parents) > 1:
318 # merge commit
318 # merge commit
319 # in case of conflict f is in modified state during
319 # in case of conflict f is in modified state during
320 # merge, even if f does not differ from f in parent
320 # merge, even if f does not differ from f in parent
321 for p in parents:
321 for p in parents:
322 if f in p and not p[f].cmp(ctx[f]):
322 if f in p and not p[f].cmp(ctx[f]):
323 ctx = p[f].changectx()
323 ctx = p[f].changectx()
324 break
324 break
325 data, found = self.substitute(data, f, ctx, re_kw.subn)
325 data, found = self.substitute(data, f, ctx, re_kw.subn)
326 elif self.restrict:
326 elif self.restrict:
327 found = re_kw.search(data)
327 found = re_kw.search(data)
328 else:
328 else:
329 data, found = _shrinktext(data, re_kw.subn)
329 data, found = _shrinktext(data, re_kw.subn)
330 if found:
330 if found:
331 self.ui.note(msg % f)
331 self.ui.note(msg % f)
332 fp = self.repo.wvfs(f, "wb", atomictemp=True)
332 fp = self.repo.wvfs(f, "wb", atomictemp=True)
333 fp.write(data)
333 fp.write(data)
334 fp.close()
334 fp.close()
335 if kwcmd:
335 if kwcmd:
336 self.repo.dirstate.normal(f)
336 self.repo.dirstate.normal(f)
337 elif self.postcommit:
337 elif self.postcommit:
338 self.repo.dirstate.normallookup(f)
338 self.repo.dirstate.normallookup(f)
339
339
340 def shrink(self, fname, text):
340 def shrink(self, fname, text):
341 '''Returns text with all keyword substitutions removed.'''
341 '''Returns text with all keyword substitutions removed.'''
342 if self.match(fname) and not stringutil.binary(text):
342 if self.match(fname) and not stringutil.binary(text):
343 return _shrinktext(text, self.rekwexp.sub)
343 return _shrinktext(text, self.rekwexp.sub)
344 return text
344 return text
345
345
346 def shrinklines(self, fname, lines):
346 def shrinklines(self, fname, lines):
347 '''Returns lines with keyword substitutions removed.'''
347 '''Returns lines with keyword substitutions removed.'''
348 if self.match(fname):
348 if self.match(fname):
349 text = ''.join(lines)
349 text = ''.join(lines)
350 if not stringutil.binary(text):
350 if not stringutil.binary(text):
351 return _shrinktext(text, self.rekwexp.sub).splitlines(True)
351 return _shrinktext(text, self.rekwexp.sub).splitlines(True)
352 return lines
352 return lines
353
353
354 def wread(self, fname, data):
354 def wread(self, fname, data):
355 '''If in restricted mode returns data read from wdir with
355 '''If in restricted mode returns data read from wdir with
356 keyword substitutions removed.'''
356 keyword substitutions removed.'''
357 if self.restrict:
357 if self.restrict:
358 return self.shrink(fname, data)
358 return self.shrink(fname, data)
359 return data
359 return data
360
360
361 class kwfilelog(filelog.filelog):
361 class kwfilelog(filelog.filelog):
362 '''
362 '''
363 Subclass of filelog to hook into its read, add, cmp methods.
363 Subclass of filelog to hook into its read, add, cmp methods.
364 Keywords are "stored" unexpanded, and processed on reading.
364 Keywords are "stored" unexpanded, and processed on reading.
365 '''
365 '''
366 def __init__(self, opener, kwt, path):
366 def __init__(self, opener, kwt, path):
367 super(kwfilelog, self).__init__(opener, path)
367 super(kwfilelog, self).__init__(opener, path)
368 self.kwt = kwt
368 self.kwt = kwt
369 self.path = path
369 self.path = path
370
370
371 def read(self, node):
371 def read(self, node):
372 '''Expands keywords when reading filelog.'''
372 '''Expands keywords when reading filelog.'''
373 data = super(kwfilelog, self).read(node)
373 data = super(kwfilelog, self).read(node)
374 if self.renamed(node):
374 if self.renamed(node):
375 return data
375 return data
376 return self.kwt.expand(self.path, node, data)
376 return self.kwt.expand(self.path, node, data)
377
377
378 def add(self, text, meta, tr, link, p1=None, p2=None):
378 def add(self, text, meta, tr, link, p1=None, p2=None):
379 '''Removes keyword substitutions when adding to filelog.'''
379 '''Removes keyword substitutions when adding to filelog.'''
380 text = self.kwt.shrink(self.path, text)
380 text = self.kwt.shrink(self.path, text)
381 return super(kwfilelog, self).add(text, meta, tr, link, p1, p2)
381 return super(kwfilelog, self).add(text, meta, tr, link, p1, p2)
382
382
383 def cmp(self, node, text):
383 def cmp(self, node, text):
384 '''Removes keyword substitutions for comparison.'''
384 '''Removes keyword substitutions for comparison.'''
385 text = self.kwt.shrink(self.path, text)
385 text = self.kwt.shrink(self.path, text)
386 return super(kwfilelog, self).cmp(node, text)
386 return super(kwfilelog, self).cmp(node, text)
387
387
388 def _status(ui, repo, wctx, kwt, *pats, **opts):
388 def _status(ui, repo, wctx, kwt, *pats, **opts):
389 '''Bails out if [keyword] configuration is not active.
389 '''Bails out if [keyword] configuration is not active.
390 Returns status of working directory.'''
390 Returns status of working directory.'''
391 if kwt:
391 if kwt:
392 opts = pycompat.byteskwargs(opts)
392 opts = pycompat.byteskwargs(opts)
393 return repo.status(match=scmutil.match(wctx, pats, opts), clean=True,
393 return repo.status(match=scmutil.match(wctx, pats, opts), clean=True,
394 unknown=opts.get('unknown') or opts.get('all'))
394 unknown=opts.get('unknown') or opts.get('all'))
395 if ui.configitems('keyword'):
395 if ui.configitems('keyword'):
396 raise error.Abort(_('[keyword] patterns cannot match'))
396 raise error.Abort(_('[keyword] patterns cannot match'))
397 raise error.Abort(_('no [keyword] patterns configured'))
397 raise error.Abort(_('no [keyword] patterns configured'))
398
398
399 def _kwfwrite(ui, repo, expand, *pats, **opts):
399 def _kwfwrite(ui, repo, expand, *pats, **opts):
400 '''Selects files and passes them to kwtemplater.overwrite.'''
400 '''Selects files and passes them to kwtemplater.overwrite.'''
401 wctx = repo[None]
401 wctx = repo[None]
402 if len(wctx.parents()) > 1:
402 if len(wctx.parents()) > 1:
403 raise error.Abort(_('outstanding uncommitted merge'))
403 raise error.Abort(_('outstanding uncommitted merge'))
404 kwt = getattr(repo, '_keywordkwt', None)
404 kwt = getattr(repo, '_keywordkwt', None)
405 with repo.wlock():
405 with repo.wlock():
406 status = _status(ui, repo, wctx, kwt, *pats, **opts)
406 status = _status(ui, repo, wctx, kwt, *pats, **opts)
407 if status.modified or status.added or status.removed or status.deleted:
407 if status.modified or status.added or status.removed or status.deleted:
408 raise error.Abort(_('outstanding uncommitted changes'))
408 raise error.Abort(_('outstanding uncommitted changes'))
409 kwt.overwrite(wctx, status.clean, True, expand)
409 kwt.overwrite(wctx, status.clean, True, expand)
410
410
411 @command('kwdemo',
411 @command('kwdemo',
412 [('d', 'default', None, _('show default keyword template maps')),
412 [('d', 'default', None, _('show default keyword template maps')),
413 ('f', 'rcfile', '',
413 ('f', 'rcfile', '',
414 _('read maps from rcfile'), _('FILE'))],
414 _('read maps from rcfile'), _('FILE'))],
415 _('hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...'),
415 _('hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...'),
416 optionalrepo=True)
416 optionalrepo=True)
417 def demo(ui, repo, *args, **opts):
417 def demo(ui, repo, *args, **opts):
418 '''print [keywordmaps] configuration and an expansion example
418 '''print [keywordmaps] configuration and an expansion example
419
419
420 Show current, custom, or default keyword template maps and their
420 Show current, custom, or default keyword template maps and their
421 expansions.
421 expansions.
422
422
423 Extend the current configuration by specifying maps as arguments
423 Extend the current configuration by specifying maps as arguments
424 and using -f/--rcfile to source an external hgrc file.
424 and using -f/--rcfile to source an external hgrc file.
425
425
426 Use -d/--default to disable current configuration.
426 Use -d/--default to disable current configuration.
427
427
428 See :hg:`help templates` for information on templates and filters.
428 See :hg:`help templates` for information on templates and filters.
429 '''
429 '''
430 def demoitems(section, items):
430 def demoitems(section, items):
431 ui.write('[%s]\n' % section)
431 ui.write('[%s]\n' % section)
432 for k, v in sorted(items):
432 for k, v in sorted(items):
433 ui.write('%s = %s\n' % (k, v))
433 ui.write('%s = %s\n' % (k, v))
434
434
435 fn = 'demo.txt'
435 fn = 'demo.txt'
436 tmpdir = pycompat.mkdtemp('', 'kwdemo.')
436 tmpdir = pycompat.mkdtemp('', 'kwdemo.')
437 ui.note(_('creating temporary repository at %s\n') % tmpdir)
437 ui.note(_('creating temporary repository at %s\n') % tmpdir)
438 if repo is None:
438 if repo is None:
439 baseui = ui
439 baseui = ui
440 else:
440 else:
441 baseui = repo.baseui
441 baseui = repo.baseui
442 repo = localrepo.localrepository(baseui, tmpdir, True)
442 repo = localrepo.instance(baseui, tmpdir, create=True)
443 ui.setconfig('keyword', fn, '', 'keyword')
443 ui.setconfig('keyword', fn, '', 'keyword')
444 svn = ui.configbool('keywordset', 'svn')
444 svn = ui.configbool('keywordset', 'svn')
445 # explicitly set keywordset for demo output
445 # explicitly set keywordset for demo output
446 ui.setconfig('keywordset', 'svn', svn, 'keyword')
446 ui.setconfig('keywordset', 'svn', svn, 'keyword')
447
447
448 uikwmaps = ui.configitems('keywordmaps')
448 uikwmaps = ui.configitems('keywordmaps')
449 if args or opts.get(r'rcfile'):
449 if args or opts.get(r'rcfile'):
450 ui.status(_('\n\tconfiguration using custom keyword template maps\n'))
450 ui.status(_('\n\tconfiguration using custom keyword template maps\n'))
451 if uikwmaps:
451 if uikwmaps:
452 ui.status(_('\textending current template maps\n'))
452 ui.status(_('\textending current template maps\n'))
453 if opts.get(r'default') or not uikwmaps:
453 if opts.get(r'default') or not uikwmaps:
454 if svn:
454 if svn:
455 ui.status(_('\toverriding default svn keywordset\n'))
455 ui.status(_('\toverriding default svn keywordset\n'))
456 else:
456 else:
457 ui.status(_('\toverriding default cvs keywordset\n'))
457 ui.status(_('\toverriding default cvs keywordset\n'))
458 if opts.get(r'rcfile'):
458 if opts.get(r'rcfile'):
459 ui.readconfig(opts.get('rcfile'))
459 ui.readconfig(opts.get('rcfile'))
460 if args:
460 if args:
461 # simulate hgrc parsing
461 # simulate hgrc parsing
462 rcmaps = '[keywordmaps]\n%s\n' % '\n'.join(args)
462 rcmaps = '[keywordmaps]\n%s\n' % '\n'.join(args)
463 repo.vfs.write('hgrc', rcmaps)
463 repo.vfs.write('hgrc', rcmaps)
464 ui.readconfig(repo.vfs.join('hgrc'))
464 ui.readconfig(repo.vfs.join('hgrc'))
465 kwmaps = dict(ui.configitems('keywordmaps'))
465 kwmaps = dict(ui.configitems('keywordmaps'))
466 elif opts.get(r'default'):
466 elif opts.get(r'default'):
467 if svn:
467 if svn:
468 ui.status(_('\n\tconfiguration using default svn keywordset\n'))
468 ui.status(_('\n\tconfiguration using default svn keywordset\n'))
469 else:
469 else:
470 ui.status(_('\n\tconfiguration using default cvs keywordset\n'))
470 ui.status(_('\n\tconfiguration using default cvs keywordset\n'))
471 kwmaps = _defaultkwmaps(ui)
471 kwmaps = _defaultkwmaps(ui)
472 if uikwmaps:
472 if uikwmaps:
473 ui.status(_('\tdisabling current template maps\n'))
473 ui.status(_('\tdisabling current template maps\n'))
474 for k, v in kwmaps.iteritems():
474 for k, v in kwmaps.iteritems():
475 ui.setconfig('keywordmaps', k, v, 'keyword')
475 ui.setconfig('keywordmaps', k, v, 'keyword')
476 else:
476 else:
477 ui.status(_('\n\tconfiguration using current keyword template maps\n'))
477 ui.status(_('\n\tconfiguration using current keyword template maps\n'))
478 if uikwmaps:
478 if uikwmaps:
479 kwmaps = dict(uikwmaps)
479 kwmaps = dict(uikwmaps)
480 else:
480 else:
481 kwmaps = _defaultkwmaps(ui)
481 kwmaps = _defaultkwmaps(ui)
482
482
483 uisetup(ui)
483 uisetup(ui)
484 reposetup(ui, repo)
484 reposetup(ui, repo)
485 ui.write(('[extensions]\nkeyword =\n'))
485 ui.write(('[extensions]\nkeyword =\n'))
486 demoitems('keyword', ui.configitems('keyword'))
486 demoitems('keyword', ui.configitems('keyword'))
487 demoitems('keywordset', ui.configitems('keywordset'))
487 demoitems('keywordset', ui.configitems('keywordset'))
488 demoitems('keywordmaps', kwmaps.iteritems())
488 demoitems('keywordmaps', kwmaps.iteritems())
489 keywords = '$' + '$\n$'.join(sorted(kwmaps.keys())) + '$\n'
489 keywords = '$' + '$\n$'.join(sorted(kwmaps.keys())) + '$\n'
490 repo.wvfs.write(fn, keywords)
490 repo.wvfs.write(fn, keywords)
491 repo[None].add([fn])
491 repo[None].add([fn])
492 ui.note(_('\nkeywords written to %s:\n') % fn)
492 ui.note(_('\nkeywords written to %s:\n') % fn)
493 ui.note(keywords)
493 ui.note(keywords)
494 with repo.wlock():
494 with repo.wlock():
495 repo.dirstate.setbranch('demobranch')
495 repo.dirstate.setbranch('demobranch')
496 for name, cmd in ui.configitems('hooks'):
496 for name, cmd in ui.configitems('hooks'):
497 if name.split('.', 1)[0].find('commit') > -1:
497 if name.split('.', 1)[0].find('commit') > -1:
498 repo.ui.setconfig('hooks', name, '', 'keyword')
498 repo.ui.setconfig('hooks', name, '', 'keyword')
499 msg = _('hg keyword configuration and expansion example')
499 msg = _('hg keyword configuration and expansion example')
500 ui.note(("hg ci -m '%s'\n" % msg))
500 ui.note(("hg ci -m '%s'\n" % msg))
501 repo.commit(text=msg)
501 repo.commit(text=msg)
502 ui.status(_('\n\tkeywords expanded\n'))
502 ui.status(_('\n\tkeywords expanded\n'))
503 ui.write(repo.wread(fn))
503 ui.write(repo.wread(fn))
504 repo.wvfs.rmtree(repo.root)
504 repo.wvfs.rmtree(repo.root)
505
505
506 @command('kwexpand',
506 @command('kwexpand',
507 cmdutil.walkopts,
507 cmdutil.walkopts,
508 _('hg kwexpand [OPTION]... [FILE]...'),
508 _('hg kwexpand [OPTION]... [FILE]...'),
509 inferrepo=True)
509 inferrepo=True)
510 def expand(ui, repo, *pats, **opts):
510 def expand(ui, repo, *pats, **opts):
511 '''expand keywords in the working directory
511 '''expand keywords in the working directory
512
512
513 Run after (re)enabling keyword expansion.
513 Run after (re)enabling keyword expansion.
514
514
515 kwexpand refuses to run if given files contain local changes.
515 kwexpand refuses to run if given files contain local changes.
516 '''
516 '''
517 # 3rd argument sets expansion to True
517 # 3rd argument sets expansion to True
518 _kwfwrite(ui, repo, True, *pats, **opts)
518 _kwfwrite(ui, repo, True, *pats, **opts)
519
519
520 @command('kwfiles',
520 @command('kwfiles',
521 [('A', 'all', None, _('show keyword status flags of all files')),
521 [('A', 'all', None, _('show keyword status flags of all files')),
522 ('i', 'ignore', None, _('show files excluded from expansion')),
522 ('i', 'ignore', None, _('show files excluded from expansion')),
523 ('u', 'unknown', None, _('only show unknown (not tracked) files')),
523 ('u', 'unknown', None, _('only show unknown (not tracked) files')),
524 ] + cmdutil.walkopts,
524 ] + cmdutil.walkopts,
525 _('hg kwfiles [OPTION]... [FILE]...'),
525 _('hg kwfiles [OPTION]... [FILE]...'),
526 inferrepo=True)
526 inferrepo=True)
527 def files(ui, repo, *pats, **opts):
527 def files(ui, repo, *pats, **opts):
528 '''show files configured for keyword expansion
528 '''show files configured for keyword expansion
529
529
530 List which files in the working directory are matched by the
530 List which files in the working directory are matched by the
531 [keyword] configuration patterns.
531 [keyword] configuration patterns.
532
532
533 Useful to prevent inadvertent keyword expansion and to speed up
533 Useful to prevent inadvertent keyword expansion and to speed up
534 execution by including only files that are actual candidates for
534 execution by including only files that are actual candidates for
535 expansion.
535 expansion.
536
536
537 See :hg:`help keyword` on how to construct patterns both for
537 See :hg:`help keyword` on how to construct patterns both for
538 inclusion and exclusion of files.
538 inclusion and exclusion of files.
539
539
540 With -A/--all and -v/--verbose the codes used to show the status
540 With -A/--all and -v/--verbose the codes used to show the status
541 of files are::
541 of files are::
542
542
543 K = keyword expansion candidate
543 K = keyword expansion candidate
544 k = keyword expansion candidate (not tracked)
544 k = keyword expansion candidate (not tracked)
545 I = ignored
545 I = ignored
546 i = ignored (not tracked)
546 i = ignored (not tracked)
547 '''
547 '''
548 kwt = getattr(repo, '_keywordkwt', None)
548 kwt = getattr(repo, '_keywordkwt', None)
549 wctx = repo[None]
549 wctx = repo[None]
550 status = _status(ui, repo, wctx, kwt, *pats, **opts)
550 status = _status(ui, repo, wctx, kwt, *pats, **opts)
551 if pats:
551 if pats:
552 cwd = repo.getcwd()
552 cwd = repo.getcwd()
553 else:
553 else:
554 cwd = ''
554 cwd = ''
555 files = []
555 files = []
556 opts = pycompat.byteskwargs(opts)
556 opts = pycompat.byteskwargs(opts)
557 if not opts.get('unknown') or opts.get('all'):
557 if not opts.get('unknown') or opts.get('all'):
558 files = sorted(status.modified + status.added + status.clean)
558 files = sorted(status.modified + status.added + status.clean)
559 kwfiles = kwt.iskwfile(files, wctx)
559 kwfiles = kwt.iskwfile(files, wctx)
560 kwdeleted = kwt.iskwfile(status.deleted, wctx)
560 kwdeleted = kwt.iskwfile(status.deleted, wctx)
561 kwunknown = kwt.iskwfile(status.unknown, wctx)
561 kwunknown = kwt.iskwfile(status.unknown, wctx)
562 if not opts.get('ignore') or opts.get('all'):
562 if not opts.get('ignore') or opts.get('all'):
563 showfiles = kwfiles, kwdeleted, kwunknown
563 showfiles = kwfiles, kwdeleted, kwunknown
564 else:
564 else:
565 showfiles = [], [], []
565 showfiles = [], [], []
566 if opts.get('all') or opts.get('ignore'):
566 if opts.get('all') or opts.get('ignore'):
567 showfiles += ([f for f in files if f not in kwfiles],
567 showfiles += ([f for f in files if f not in kwfiles],
568 [f for f in status.unknown if f not in kwunknown])
568 [f for f in status.unknown if f not in kwunknown])
569 kwlabels = 'enabled deleted enabledunknown ignored ignoredunknown'.split()
569 kwlabels = 'enabled deleted enabledunknown ignored ignoredunknown'.split()
570 kwstates = zip(kwlabels, pycompat.bytestr('K!kIi'), showfiles)
570 kwstates = zip(kwlabels, pycompat.bytestr('K!kIi'), showfiles)
571 fm = ui.formatter('kwfiles', opts)
571 fm = ui.formatter('kwfiles', opts)
572 fmt = '%.0s%s\n'
572 fmt = '%.0s%s\n'
573 if opts.get('all') or ui.verbose:
573 if opts.get('all') or ui.verbose:
574 fmt = '%s %s\n'
574 fmt = '%s %s\n'
575 for kwstate, char, filenames in kwstates:
575 for kwstate, char, filenames in kwstates:
576 label = 'kwfiles.' + kwstate
576 label = 'kwfiles.' + kwstate
577 for f in filenames:
577 for f in filenames:
578 fm.startitem()
578 fm.startitem()
579 fm.data(kwstatus=char, path=f)
579 fm.data(kwstatus=char, path=f)
580 fm.plain(fmt % (char, repo.pathto(f, cwd)), label=label)
580 fm.plain(fmt % (char, repo.pathto(f, cwd)), label=label)
581 fm.end()
581 fm.end()
582
582
583 @command('kwshrink',
583 @command('kwshrink',
584 cmdutil.walkopts,
584 cmdutil.walkopts,
585 _('hg kwshrink [OPTION]... [FILE]...'),
585 _('hg kwshrink [OPTION]... [FILE]...'),
586 inferrepo=True)
586 inferrepo=True)
587 def shrink(ui, repo, *pats, **opts):
587 def shrink(ui, repo, *pats, **opts):
588 '''revert expanded keywords in the working directory
588 '''revert expanded keywords in the working directory
589
589
590 Must be run before changing/disabling active keywords.
590 Must be run before changing/disabling active keywords.
591
591
592 kwshrink refuses to run if given files contain local changes.
592 kwshrink refuses to run if given files contain local changes.
593 '''
593 '''
594 # 3rd argument sets expansion to False
594 # 3rd argument sets expansion to False
595 _kwfwrite(ui, repo, False, *pats, **opts)
595 _kwfwrite(ui, repo, False, *pats, **opts)
596
596
597 # monkeypatches
597 # monkeypatches
598
598
599 def kwpatchfile_init(orig, self, ui, gp, backend, store, eolmode=None):
599 def kwpatchfile_init(orig, self, ui, gp, backend, store, eolmode=None):
600 '''Monkeypatch/wrap patch.patchfile.__init__ to avoid
600 '''Monkeypatch/wrap patch.patchfile.__init__ to avoid
601 rejects or conflicts due to expanded keywords in working dir.'''
601 rejects or conflicts due to expanded keywords in working dir.'''
602 orig(self, ui, gp, backend, store, eolmode)
602 orig(self, ui, gp, backend, store, eolmode)
603 kwt = getattr(getattr(backend, 'repo', None), '_keywordkwt', None)
603 kwt = getattr(getattr(backend, 'repo', None), '_keywordkwt', None)
604 if kwt:
604 if kwt:
605 # shrink keywords read from working dir
605 # shrink keywords read from working dir
606 self.lines = kwt.shrinklines(self.fname, self.lines)
606 self.lines = kwt.shrinklines(self.fname, self.lines)
607
607
608 def kwdiff(orig, repo, *args, **kwargs):
608 def kwdiff(orig, repo, *args, **kwargs):
609 '''Monkeypatch patch.diff to avoid expansion.'''
609 '''Monkeypatch patch.diff to avoid expansion.'''
610 kwt = getattr(repo, '_keywordkwt', None)
610 kwt = getattr(repo, '_keywordkwt', None)
611 if kwt:
611 if kwt:
612 restrict = kwt.restrict
612 restrict = kwt.restrict
613 kwt.restrict = True
613 kwt.restrict = True
614 try:
614 try:
615 for chunk in orig(repo, *args, **kwargs):
615 for chunk in orig(repo, *args, **kwargs):
616 yield chunk
616 yield chunk
617 finally:
617 finally:
618 if kwt:
618 if kwt:
619 kwt.restrict = restrict
619 kwt.restrict = restrict
620
620
621 def kwweb_skip(orig, web):
621 def kwweb_skip(orig, web):
622 '''Wraps webcommands.x turning off keyword expansion.'''
622 '''Wraps webcommands.x turning off keyword expansion.'''
623 kwt = getattr(web.repo, '_keywordkwt', None)
623 kwt = getattr(web.repo, '_keywordkwt', None)
624 if kwt:
624 if kwt:
625 origmatch = kwt.match
625 origmatch = kwt.match
626 kwt.match = util.never
626 kwt.match = util.never
627 try:
627 try:
628 for chunk in orig(web):
628 for chunk in orig(web):
629 yield chunk
629 yield chunk
630 finally:
630 finally:
631 if kwt:
631 if kwt:
632 kwt.match = origmatch
632 kwt.match = origmatch
633
633
634 def kw_amend(orig, ui, repo, old, extra, pats, opts):
634 def kw_amend(orig, ui, repo, old, extra, pats, opts):
635 '''Wraps cmdutil.amend expanding keywords after amend.'''
635 '''Wraps cmdutil.amend expanding keywords after amend.'''
636 kwt = getattr(repo, '_keywordkwt', None)
636 kwt = getattr(repo, '_keywordkwt', None)
637 if kwt is None:
637 if kwt is None:
638 return orig(ui, repo, old, extra, pats, opts)
638 return orig(ui, repo, old, extra, pats, opts)
639 with repo.wlock():
639 with repo.wlock():
640 kwt.postcommit = True
640 kwt.postcommit = True
641 newid = orig(ui, repo, old, extra, pats, opts)
641 newid = orig(ui, repo, old, extra, pats, opts)
642 if newid != old.node():
642 if newid != old.node():
643 ctx = repo[newid]
643 ctx = repo[newid]
644 kwt.restrict = True
644 kwt.restrict = True
645 kwt.overwrite(ctx, ctx.files(), False, True)
645 kwt.overwrite(ctx, ctx.files(), False, True)
646 kwt.restrict = False
646 kwt.restrict = False
647 return newid
647 return newid
648
648
649 def kw_copy(orig, ui, repo, pats, opts, rename=False):
649 def kw_copy(orig, ui, repo, pats, opts, rename=False):
650 '''Wraps cmdutil.copy so that copy/rename destinations do not
650 '''Wraps cmdutil.copy so that copy/rename destinations do not
651 contain expanded keywords.
651 contain expanded keywords.
652 Note that the source of a regular file destination may also be a
652 Note that the source of a regular file destination may also be a
653 symlink:
653 symlink:
654 hg cp sym x -> x is symlink
654 hg cp sym x -> x is symlink
655 cp sym x; hg cp -A sym x -> x is file (maybe expanded keywords)
655 cp sym x; hg cp -A sym x -> x is file (maybe expanded keywords)
656 For the latter we have to follow the symlink to find out whether its
656 For the latter we have to follow the symlink to find out whether its
657 target is configured for expansion and we therefore must unexpand the
657 target is configured for expansion and we therefore must unexpand the
658 keywords in the destination.'''
658 keywords in the destination.'''
659 kwt = getattr(repo, '_keywordkwt', None)
659 kwt = getattr(repo, '_keywordkwt', None)
660 if kwt is None:
660 if kwt is None:
661 return orig(ui, repo, pats, opts, rename)
661 return orig(ui, repo, pats, opts, rename)
662 with repo.wlock():
662 with repo.wlock():
663 orig(ui, repo, pats, opts, rename)
663 orig(ui, repo, pats, opts, rename)
664 if opts.get('dry_run'):
664 if opts.get('dry_run'):
665 return
665 return
666 wctx = repo[None]
666 wctx = repo[None]
667 cwd = repo.getcwd()
667 cwd = repo.getcwd()
668
668
669 def haskwsource(dest):
669 def haskwsource(dest):
670 '''Returns true if dest is a regular file and configured for
670 '''Returns true if dest is a regular file and configured for
671 expansion or a symlink which points to a file configured for
671 expansion or a symlink which points to a file configured for
672 expansion. '''
672 expansion. '''
673 source = repo.dirstate.copied(dest)
673 source = repo.dirstate.copied(dest)
674 if 'l' in wctx.flags(source):
674 if 'l' in wctx.flags(source):
675 source = pathutil.canonpath(repo.root, cwd,
675 source = pathutil.canonpath(repo.root, cwd,
676 os.path.realpath(source))
676 os.path.realpath(source))
677 return kwt.match(source)
677 return kwt.match(source)
678
678
679 candidates = [f for f in repo.dirstate.copies() if
679 candidates = [f for f in repo.dirstate.copies() if
680 'l' not in wctx.flags(f) and haskwsource(f)]
680 'l' not in wctx.flags(f) and haskwsource(f)]
681 kwt.overwrite(wctx, candidates, False, False)
681 kwt.overwrite(wctx, candidates, False, False)
682
682
683 def kw_dorecord(orig, ui, repo, commitfunc, *pats, **opts):
683 def kw_dorecord(orig, ui, repo, commitfunc, *pats, **opts):
684 '''Wraps record.dorecord expanding keywords after recording.'''
684 '''Wraps record.dorecord expanding keywords after recording.'''
685 kwt = getattr(repo, '_keywordkwt', None)
685 kwt = getattr(repo, '_keywordkwt', None)
686 if kwt is None:
686 if kwt is None:
687 return orig(ui, repo, commitfunc, *pats, **opts)
687 return orig(ui, repo, commitfunc, *pats, **opts)
688 with repo.wlock():
688 with repo.wlock():
689 # record returns 0 even when nothing has changed
689 # record returns 0 even when nothing has changed
690 # therefore compare nodes before and after
690 # therefore compare nodes before and after
691 kwt.postcommit = True
691 kwt.postcommit = True
692 ctx = repo['.']
692 ctx = repo['.']
693 wstatus = ctx.status()
693 wstatus = ctx.status()
694 ret = orig(ui, repo, commitfunc, *pats, **opts)
694 ret = orig(ui, repo, commitfunc, *pats, **opts)
695 recctx = repo['.']
695 recctx = repo['.']
696 if ctx != recctx:
696 if ctx != recctx:
697 modified, added = _preselect(wstatus, recctx.files())
697 modified, added = _preselect(wstatus, recctx.files())
698 kwt.restrict = False
698 kwt.restrict = False
699 kwt.overwrite(recctx, modified, False, True)
699 kwt.overwrite(recctx, modified, False, True)
700 kwt.overwrite(recctx, added, False, True, True)
700 kwt.overwrite(recctx, added, False, True, True)
701 kwt.restrict = True
701 kwt.restrict = True
702 return ret
702 return ret
703
703
704 def kwfilectx_cmp(orig, self, fctx):
704 def kwfilectx_cmp(orig, self, fctx):
705 if fctx._customcmp:
705 if fctx._customcmp:
706 return fctx.cmp(self)
706 return fctx.cmp(self)
707 kwt = getattr(self._repo, '_keywordkwt', None)
707 kwt = getattr(self._repo, '_keywordkwt', None)
708 if kwt is None:
708 if kwt is None:
709 return orig(self, fctx)
709 return orig(self, fctx)
710 # keyword affects data size, comparing wdir and filelog size does
710 # keyword affects data size, comparing wdir and filelog size does
711 # not make sense
711 # not make sense
712 if (fctx._filenode is None and
712 if (fctx._filenode is None and
713 (self._repo._encodefilterpats or
713 (self._repo._encodefilterpats or
714 kwt.match(fctx.path()) and 'l' not in fctx.flags() or
714 kwt.match(fctx.path()) and 'l' not in fctx.flags() or
715 self.size() - 4 == fctx.size()) or
715 self.size() - 4 == fctx.size()) or
716 self.size() == fctx.size()):
716 self.size() == fctx.size()):
717 return self._filelog.cmp(self._filenode, fctx.data())
717 return self._filelog.cmp(self._filenode, fctx.data())
718 return True
718 return True
719
719
720 def uisetup(ui):
720 def uisetup(ui):
721 ''' Monkeypatches dispatch._parse to retrieve user command.
721 ''' Monkeypatches dispatch._parse to retrieve user command.
722 Overrides file method to return kwfilelog instead of filelog
722 Overrides file method to return kwfilelog instead of filelog
723 if file matches user configuration.
723 if file matches user configuration.
724 Wraps commit to overwrite configured files with updated
724 Wraps commit to overwrite configured files with updated
725 keyword substitutions.
725 keyword substitutions.
726 Monkeypatches patch and webcommands.'''
726 Monkeypatches patch and webcommands.'''
727
727
728 def kwdispatch_parse(orig, ui, args):
728 def kwdispatch_parse(orig, ui, args):
729 '''Monkeypatch dispatch._parse to obtain running hg command.'''
729 '''Monkeypatch dispatch._parse to obtain running hg command.'''
730 cmd, func, args, options, cmdoptions = orig(ui, args)
730 cmd, func, args, options, cmdoptions = orig(ui, args)
731 kwtools['hgcmd'] = cmd
731 kwtools['hgcmd'] = cmd
732 return cmd, func, args, options, cmdoptions
732 return cmd, func, args, options, cmdoptions
733
733
734 extensions.wrapfunction(dispatch, '_parse', kwdispatch_parse)
734 extensions.wrapfunction(dispatch, '_parse', kwdispatch_parse)
735
735
736 extensions.wrapfunction(context.filectx, 'cmp', kwfilectx_cmp)
736 extensions.wrapfunction(context.filectx, 'cmp', kwfilectx_cmp)
737 extensions.wrapfunction(patch.patchfile, '__init__', kwpatchfile_init)
737 extensions.wrapfunction(patch.patchfile, '__init__', kwpatchfile_init)
738 extensions.wrapfunction(patch, 'diff', kwdiff)
738 extensions.wrapfunction(patch, 'diff', kwdiff)
739 extensions.wrapfunction(cmdutil, 'amend', kw_amend)
739 extensions.wrapfunction(cmdutil, 'amend', kw_amend)
740 extensions.wrapfunction(cmdutil, 'copy', kw_copy)
740 extensions.wrapfunction(cmdutil, 'copy', kw_copy)
741 extensions.wrapfunction(cmdutil, 'dorecord', kw_dorecord)
741 extensions.wrapfunction(cmdutil, 'dorecord', kw_dorecord)
742 for c in nokwwebcommands.split():
742 for c in nokwwebcommands.split():
743 extensions.wrapfunction(webcommands, c, kwweb_skip)
743 extensions.wrapfunction(webcommands, c, kwweb_skip)
744
744
745 def reposetup(ui, repo):
745 def reposetup(ui, repo):
746 '''Sets up repo as kwrepo for keyword substitution.'''
746 '''Sets up repo as kwrepo for keyword substitution.'''
747
747
748 try:
748 try:
749 if (not repo.local() or kwtools['hgcmd'] in nokwcommands.split()
749 if (not repo.local() or kwtools['hgcmd'] in nokwcommands.split()
750 or '.hg' in util.splitpath(repo.root)
750 or '.hg' in util.splitpath(repo.root)
751 or repo._url.startswith('bundle:')):
751 or repo._url.startswith('bundle:')):
752 return
752 return
753 except AttributeError:
753 except AttributeError:
754 pass
754 pass
755
755
756 inc, exc = [], ['.hg*']
756 inc, exc = [], ['.hg*']
757 for pat, opt in ui.configitems('keyword'):
757 for pat, opt in ui.configitems('keyword'):
758 if opt != 'ignore':
758 if opt != 'ignore':
759 inc.append(pat)
759 inc.append(pat)
760 else:
760 else:
761 exc.append(pat)
761 exc.append(pat)
762 if not inc:
762 if not inc:
763 return
763 return
764
764
765 kwt = kwtemplater(ui, repo, inc, exc)
765 kwt = kwtemplater(ui, repo, inc, exc)
766
766
767 class kwrepo(repo.__class__):
767 class kwrepo(repo.__class__):
768 def file(self, f):
768 def file(self, f):
769 if f[0] == '/':
769 if f[0] == '/':
770 f = f[1:]
770 f = f[1:]
771 return kwfilelog(self.svfs, kwt, f)
771 return kwfilelog(self.svfs, kwt, f)
772
772
773 def wread(self, filename):
773 def wread(self, filename):
774 data = super(kwrepo, self).wread(filename)
774 data = super(kwrepo, self).wread(filename)
775 return kwt.wread(filename, data)
775 return kwt.wread(filename, data)
776
776
777 def commit(self, *args, **opts):
777 def commit(self, *args, **opts):
778 # use custom commitctx for user commands
778 # use custom commitctx for user commands
779 # other extensions can still wrap repo.commitctx directly
779 # other extensions can still wrap repo.commitctx directly
780 self.commitctx = self.kwcommitctx
780 self.commitctx = self.kwcommitctx
781 try:
781 try:
782 return super(kwrepo, self).commit(*args, **opts)
782 return super(kwrepo, self).commit(*args, **opts)
783 finally:
783 finally:
784 del self.commitctx
784 del self.commitctx
785
785
786 def kwcommitctx(self, ctx, error=False):
786 def kwcommitctx(self, ctx, error=False):
787 n = super(kwrepo, self).commitctx(ctx, error)
787 n = super(kwrepo, self).commitctx(ctx, error)
788 # no lock needed, only called from repo.commit() which already locks
788 # no lock needed, only called from repo.commit() which already locks
789 if not kwt.postcommit:
789 if not kwt.postcommit:
790 restrict = kwt.restrict
790 restrict = kwt.restrict
791 kwt.restrict = True
791 kwt.restrict = True
792 kwt.overwrite(self[n], sorted(ctx.added() + ctx.modified()),
792 kwt.overwrite(self[n], sorted(ctx.added() + ctx.modified()),
793 False, True)
793 False, True)
794 kwt.restrict = restrict
794 kwt.restrict = restrict
795 return n
795 return n
796
796
797 def rollback(self, dryrun=False, force=False):
797 def rollback(self, dryrun=False, force=False):
798 with self.wlock():
798 with self.wlock():
799 origrestrict = kwt.restrict
799 origrestrict = kwt.restrict
800 try:
800 try:
801 if not dryrun:
801 if not dryrun:
802 changed = self['.'].files()
802 changed = self['.'].files()
803 ret = super(kwrepo, self).rollback(dryrun, force)
803 ret = super(kwrepo, self).rollback(dryrun, force)
804 if not dryrun:
804 if not dryrun:
805 ctx = self['.']
805 ctx = self['.']
806 modified, added = _preselect(ctx.status(), changed)
806 modified, added = _preselect(ctx.status(), changed)
807 kwt.restrict = False
807 kwt.restrict = False
808 kwt.overwrite(ctx, modified, True, True)
808 kwt.overwrite(ctx, modified, True, True)
809 kwt.overwrite(ctx, added, True, False)
809 kwt.overwrite(ctx, added, True, False)
810 return ret
810 return ret
811 finally:
811 finally:
812 kwt.restrict = origrestrict
812 kwt.restrict = origrestrict
813
813
814 repo.__class__ = kwrepo
814 repo.__class__ = kwrepo
815 repo._keywordkwt = kwt
815 repo._keywordkwt = kwt
@@ -1,2449 +1,2472 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 discovery,
34 discovery,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 narrowspec,
47 narrowspec,
48 obsolete,
48 obsolete,
49 pathutil,
49 pathutil,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repository,
53 repository,
54 repoview,
54 repoview,
55 revset,
55 revset,
56 revsetlang,
56 revsetlang,
57 scmutil,
57 scmutil,
58 sparse,
58 sparse,
59 store,
59 store,
60 subrepoutil,
60 subrepoutil,
61 tags as tagsmod,
61 tags as tagsmod,
62 transaction,
62 transaction,
63 txnutil,
63 txnutil,
64 util,
64 util,
65 vfs as vfsmod,
65 vfs as vfsmod,
66 )
66 )
67 from .utils import (
67 from .utils import (
68 interfaceutil,
68 interfaceutil,
69 procutil,
69 procutil,
70 stringutil,
70 stringutil,
71 )
71 )
72
72
73 from .revlogutils import (
73 from .revlogutils import (
74 constants as revlogconst,
74 constants as revlogconst,
75 )
75 )
76
76
77 release = lockmod.release
77 release = lockmod.release
78 urlerr = util.urlerr
78 urlerr = util.urlerr
79 urlreq = util.urlreq
79 urlreq = util.urlreq
80
80
81 # set of (path, vfs-location) tuples. vfs-location is:
81 # set of (path, vfs-location) tuples. vfs-location is:
82 # - 'plain for vfs relative paths
82 # - 'plain for vfs relative paths
83 # - '' for svfs relative paths
83 # - '' for svfs relative paths
84 _cachedfiles = set()
84 _cachedfiles = set()
85
85
86 class _basefilecache(scmutil.filecache):
86 class _basefilecache(scmutil.filecache):
87 """All filecache usage on repo are done for logic that should be unfiltered
87 """All filecache usage on repo are done for logic that should be unfiltered
88 """
88 """
89 def __get__(self, repo, type=None):
89 def __get__(self, repo, type=None):
90 if repo is None:
90 if repo is None:
91 return self
91 return self
92 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
92 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
93 def __set__(self, repo, value):
93 def __set__(self, repo, value):
94 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
94 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
95 def __delete__(self, repo):
95 def __delete__(self, repo):
96 return super(_basefilecache, self).__delete__(repo.unfiltered())
96 return super(_basefilecache, self).__delete__(repo.unfiltered())
97
97
98 class repofilecache(_basefilecache):
98 class repofilecache(_basefilecache):
99 """filecache for files in .hg but outside of .hg/store"""
99 """filecache for files in .hg but outside of .hg/store"""
100 def __init__(self, *paths):
100 def __init__(self, *paths):
101 super(repofilecache, self).__init__(*paths)
101 super(repofilecache, self).__init__(*paths)
102 for path in paths:
102 for path in paths:
103 _cachedfiles.add((path, 'plain'))
103 _cachedfiles.add((path, 'plain'))
104
104
105 def join(self, obj, fname):
105 def join(self, obj, fname):
106 return obj.vfs.join(fname)
106 return obj.vfs.join(fname)
107
107
108 class storecache(_basefilecache):
108 class storecache(_basefilecache):
109 """filecache for files in the store"""
109 """filecache for files in the store"""
110 def __init__(self, *paths):
110 def __init__(self, *paths):
111 super(storecache, self).__init__(*paths)
111 super(storecache, self).__init__(*paths)
112 for path in paths:
112 for path in paths:
113 _cachedfiles.add((path, ''))
113 _cachedfiles.add((path, ''))
114
114
115 def join(self, obj, fname):
115 def join(self, obj, fname):
116 return obj.sjoin(fname)
116 return obj.sjoin(fname)
117
117
118 def isfilecached(repo, name):
118 def isfilecached(repo, name):
119 """check if a repo has already cached "name" filecache-ed property
119 """check if a repo has already cached "name" filecache-ed property
120
120
121 This returns (cachedobj-or-None, iscached) tuple.
121 This returns (cachedobj-or-None, iscached) tuple.
122 """
122 """
123 cacheentry = repo.unfiltered()._filecache.get(name, None)
123 cacheentry = repo.unfiltered()._filecache.get(name, None)
124 if not cacheentry:
124 if not cacheentry:
125 return None, False
125 return None, False
126 return cacheentry.obj, True
126 return cacheentry.obj, True
127
127
128 class unfilteredpropertycache(util.propertycache):
128 class unfilteredpropertycache(util.propertycache):
129 """propertycache that apply to unfiltered repo only"""
129 """propertycache that apply to unfiltered repo only"""
130
130
131 def __get__(self, repo, type=None):
131 def __get__(self, repo, type=None):
132 unfi = repo.unfiltered()
132 unfi = repo.unfiltered()
133 if unfi is repo:
133 if unfi is repo:
134 return super(unfilteredpropertycache, self).__get__(unfi)
134 return super(unfilteredpropertycache, self).__get__(unfi)
135 return getattr(unfi, self.name)
135 return getattr(unfi, self.name)
136
136
137 class filteredpropertycache(util.propertycache):
137 class filteredpropertycache(util.propertycache):
138 """propertycache that must take filtering in account"""
138 """propertycache that must take filtering in account"""
139
139
140 def cachevalue(self, obj, value):
140 def cachevalue(self, obj, value):
141 object.__setattr__(obj, self.name, value)
141 object.__setattr__(obj, self.name, value)
142
142
143
143
144 def hasunfilteredcache(repo, name):
144 def hasunfilteredcache(repo, name):
145 """check if a repo has an unfilteredpropertycache value for <name>"""
145 """check if a repo has an unfilteredpropertycache value for <name>"""
146 return name in vars(repo.unfiltered())
146 return name in vars(repo.unfiltered())
147
147
148 def unfilteredmethod(orig):
148 def unfilteredmethod(orig):
149 """decorate method that always need to be run on unfiltered version"""
149 """decorate method that always need to be run on unfiltered version"""
150 def wrapper(repo, *args, **kwargs):
150 def wrapper(repo, *args, **kwargs):
151 return orig(repo.unfiltered(), *args, **kwargs)
151 return orig(repo.unfiltered(), *args, **kwargs)
152 return wrapper
152 return wrapper
153
153
154 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
154 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
155 'unbundle'}
155 'unbundle'}
156 legacycaps = moderncaps.union({'changegroupsubset'})
156 legacycaps = moderncaps.union({'changegroupsubset'})
157
157
158 @interfaceutil.implementer(repository.ipeercommandexecutor)
158 @interfaceutil.implementer(repository.ipeercommandexecutor)
159 class localcommandexecutor(object):
159 class localcommandexecutor(object):
160 def __init__(self, peer):
160 def __init__(self, peer):
161 self._peer = peer
161 self._peer = peer
162 self._sent = False
162 self._sent = False
163 self._closed = False
163 self._closed = False
164
164
165 def __enter__(self):
165 def __enter__(self):
166 return self
166 return self
167
167
168 def __exit__(self, exctype, excvalue, exctb):
168 def __exit__(self, exctype, excvalue, exctb):
169 self.close()
169 self.close()
170
170
171 def callcommand(self, command, args):
171 def callcommand(self, command, args):
172 if self._sent:
172 if self._sent:
173 raise error.ProgrammingError('callcommand() cannot be used after '
173 raise error.ProgrammingError('callcommand() cannot be used after '
174 'sendcommands()')
174 'sendcommands()')
175
175
176 if self._closed:
176 if self._closed:
177 raise error.ProgrammingError('callcommand() cannot be used after '
177 raise error.ProgrammingError('callcommand() cannot be used after '
178 'close()')
178 'close()')
179
179
180 # We don't need to support anything fancy. Just call the named
180 # We don't need to support anything fancy. Just call the named
181 # method on the peer and return a resolved future.
181 # method on the peer and return a resolved future.
182 fn = getattr(self._peer, pycompat.sysstr(command))
182 fn = getattr(self._peer, pycompat.sysstr(command))
183
183
184 f = pycompat.futures.Future()
184 f = pycompat.futures.Future()
185
185
186 try:
186 try:
187 result = fn(**pycompat.strkwargs(args))
187 result = fn(**pycompat.strkwargs(args))
188 except Exception:
188 except Exception:
189 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
189 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
190 else:
190 else:
191 f.set_result(result)
191 f.set_result(result)
192
192
193 return f
193 return f
194
194
195 def sendcommands(self):
195 def sendcommands(self):
196 self._sent = True
196 self._sent = True
197
197
198 def close(self):
198 def close(self):
199 self._closed = True
199 self._closed = True
200
200
201 @interfaceutil.implementer(repository.ipeercommands)
201 @interfaceutil.implementer(repository.ipeercommands)
202 class localpeer(repository.peer):
202 class localpeer(repository.peer):
203 '''peer for a local repo; reflects only the most recent API'''
203 '''peer for a local repo; reflects only the most recent API'''
204
204
205 def __init__(self, repo, caps=None):
205 def __init__(self, repo, caps=None):
206 super(localpeer, self).__init__()
206 super(localpeer, self).__init__()
207
207
208 if caps is None:
208 if caps is None:
209 caps = moderncaps.copy()
209 caps = moderncaps.copy()
210 self._repo = repo.filtered('served')
210 self._repo = repo.filtered('served')
211 self.ui = repo.ui
211 self.ui = repo.ui
212 self._caps = repo._restrictcapabilities(caps)
212 self._caps = repo._restrictcapabilities(caps)
213
213
214 # Begin of _basepeer interface.
214 # Begin of _basepeer interface.
215
215
216 def url(self):
216 def url(self):
217 return self._repo.url()
217 return self._repo.url()
218
218
219 def local(self):
219 def local(self):
220 return self._repo
220 return self._repo
221
221
222 def peer(self):
222 def peer(self):
223 return self
223 return self
224
224
225 def canpush(self):
225 def canpush(self):
226 return True
226 return True
227
227
228 def close(self):
228 def close(self):
229 self._repo.close()
229 self._repo.close()
230
230
231 # End of _basepeer interface.
231 # End of _basepeer interface.
232
232
233 # Begin of _basewirecommands interface.
233 # Begin of _basewirecommands interface.
234
234
235 def branchmap(self):
235 def branchmap(self):
236 return self._repo.branchmap()
236 return self._repo.branchmap()
237
237
238 def capabilities(self):
238 def capabilities(self):
239 return self._caps
239 return self._caps
240
240
241 def clonebundles(self):
241 def clonebundles(self):
242 return self._repo.tryread('clonebundles.manifest')
242 return self._repo.tryread('clonebundles.manifest')
243
243
244 def debugwireargs(self, one, two, three=None, four=None, five=None):
244 def debugwireargs(self, one, two, three=None, four=None, five=None):
245 """Used to test argument passing over the wire"""
245 """Used to test argument passing over the wire"""
246 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
246 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
247 pycompat.bytestr(four),
247 pycompat.bytestr(four),
248 pycompat.bytestr(five))
248 pycompat.bytestr(five))
249
249
250 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
250 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
251 **kwargs):
251 **kwargs):
252 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
252 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
253 common=common, bundlecaps=bundlecaps,
253 common=common, bundlecaps=bundlecaps,
254 **kwargs)[1]
254 **kwargs)[1]
255 cb = util.chunkbuffer(chunks)
255 cb = util.chunkbuffer(chunks)
256
256
257 if exchange.bundle2requested(bundlecaps):
257 if exchange.bundle2requested(bundlecaps):
258 # When requesting a bundle2, getbundle returns a stream to make the
258 # When requesting a bundle2, getbundle returns a stream to make the
259 # wire level function happier. We need to build a proper object
259 # wire level function happier. We need to build a proper object
260 # from it in local peer.
260 # from it in local peer.
261 return bundle2.getunbundler(self.ui, cb)
261 return bundle2.getunbundler(self.ui, cb)
262 else:
262 else:
263 return changegroup.getunbundler('01', cb, None)
263 return changegroup.getunbundler('01', cb, None)
264
264
265 def heads(self):
265 def heads(self):
266 return self._repo.heads()
266 return self._repo.heads()
267
267
268 def known(self, nodes):
268 def known(self, nodes):
269 return self._repo.known(nodes)
269 return self._repo.known(nodes)
270
270
271 def listkeys(self, namespace):
271 def listkeys(self, namespace):
272 return self._repo.listkeys(namespace)
272 return self._repo.listkeys(namespace)
273
273
274 def lookup(self, key):
274 def lookup(self, key):
275 return self._repo.lookup(key)
275 return self._repo.lookup(key)
276
276
277 def pushkey(self, namespace, key, old, new):
277 def pushkey(self, namespace, key, old, new):
278 return self._repo.pushkey(namespace, key, old, new)
278 return self._repo.pushkey(namespace, key, old, new)
279
279
280 def stream_out(self):
280 def stream_out(self):
281 raise error.Abort(_('cannot perform stream clone against local '
281 raise error.Abort(_('cannot perform stream clone against local '
282 'peer'))
282 'peer'))
283
283
284 def unbundle(self, bundle, heads, url):
284 def unbundle(self, bundle, heads, url):
285 """apply a bundle on a repo
285 """apply a bundle on a repo
286
286
287 This function handles the repo locking itself."""
287 This function handles the repo locking itself."""
288 try:
288 try:
289 try:
289 try:
290 bundle = exchange.readbundle(self.ui, bundle, None)
290 bundle = exchange.readbundle(self.ui, bundle, None)
291 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
291 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
292 if util.safehasattr(ret, 'getchunks'):
292 if util.safehasattr(ret, 'getchunks'):
293 # This is a bundle20 object, turn it into an unbundler.
293 # This is a bundle20 object, turn it into an unbundler.
294 # This little dance should be dropped eventually when the
294 # This little dance should be dropped eventually when the
295 # API is finally improved.
295 # API is finally improved.
296 stream = util.chunkbuffer(ret.getchunks())
296 stream = util.chunkbuffer(ret.getchunks())
297 ret = bundle2.getunbundler(self.ui, stream)
297 ret = bundle2.getunbundler(self.ui, stream)
298 return ret
298 return ret
299 except Exception as exc:
299 except Exception as exc:
300 # If the exception contains output salvaged from a bundle2
300 # If the exception contains output salvaged from a bundle2
301 # reply, we need to make sure it is printed before continuing
301 # reply, we need to make sure it is printed before continuing
302 # to fail. So we build a bundle2 with such output and consume
302 # to fail. So we build a bundle2 with such output and consume
303 # it directly.
303 # it directly.
304 #
304 #
305 # This is not very elegant but allows a "simple" solution for
305 # This is not very elegant but allows a "simple" solution for
306 # issue4594
306 # issue4594
307 output = getattr(exc, '_bundle2salvagedoutput', ())
307 output = getattr(exc, '_bundle2salvagedoutput', ())
308 if output:
308 if output:
309 bundler = bundle2.bundle20(self._repo.ui)
309 bundler = bundle2.bundle20(self._repo.ui)
310 for out in output:
310 for out in output:
311 bundler.addpart(out)
311 bundler.addpart(out)
312 stream = util.chunkbuffer(bundler.getchunks())
312 stream = util.chunkbuffer(bundler.getchunks())
313 b = bundle2.getunbundler(self.ui, stream)
313 b = bundle2.getunbundler(self.ui, stream)
314 bundle2.processbundle(self._repo, b)
314 bundle2.processbundle(self._repo, b)
315 raise
315 raise
316 except error.PushRaced as exc:
316 except error.PushRaced as exc:
317 raise error.ResponseError(_('push failed:'),
317 raise error.ResponseError(_('push failed:'),
318 stringutil.forcebytestr(exc))
318 stringutil.forcebytestr(exc))
319
319
320 # End of _basewirecommands interface.
320 # End of _basewirecommands interface.
321
321
322 # Begin of peer interface.
322 # Begin of peer interface.
323
323
324 def commandexecutor(self):
324 def commandexecutor(self):
325 return localcommandexecutor(self)
325 return localcommandexecutor(self)
326
326
327 # End of peer interface.
327 # End of peer interface.
328
328
329 @interfaceutil.implementer(repository.ipeerlegacycommands)
329 @interfaceutil.implementer(repository.ipeerlegacycommands)
330 class locallegacypeer(localpeer):
330 class locallegacypeer(localpeer):
331 '''peer extension which implements legacy methods too; used for tests with
331 '''peer extension which implements legacy methods too; used for tests with
332 restricted capabilities'''
332 restricted capabilities'''
333
333
334 def __init__(self, repo):
334 def __init__(self, repo):
335 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
335 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
336
336
337 # Begin of baselegacywirecommands interface.
337 # Begin of baselegacywirecommands interface.
338
338
339 def between(self, pairs):
339 def between(self, pairs):
340 return self._repo.between(pairs)
340 return self._repo.between(pairs)
341
341
342 def branches(self, nodes):
342 def branches(self, nodes):
343 return self._repo.branches(nodes)
343 return self._repo.branches(nodes)
344
344
345 def changegroup(self, nodes, source):
345 def changegroup(self, nodes, source):
346 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
346 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
347 missingheads=self._repo.heads())
347 missingheads=self._repo.heads())
348 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
348 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
349
349
350 def changegroupsubset(self, bases, heads, source):
350 def changegroupsubset(self, bases, heads, source):
351 outgoing = discovery.outgoing(self._repo, missingroots=bases,
351 outgoing = discovery.outgoing(self._repo, missingroots=bases,
352 missingheads=heads)
352 missingheads=heads)
353 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
353 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
354
354
355 # End of baselegacywirecommands interface.
355 # End of baselegacywirecommands interface.
356
356
357 # Increment the sub-version when the revlog v2 format changes to lock out old
357 # Increment the sub-version when the revlog v2 format changes to lock out old
358 # clients.
358 # clients.
359 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
359 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
360
360
361 # A repository with the sparserevlog feature will have delta chains that
361 # A repository with the sparserevlog feature will have delta chains that
362 # can spread over a larger span. Sparse reading cuts these large spans into
362 # can spread over a larger span. Sparse reading cuts these large spans into
363 # pieces, so that each piece isn't too big.
363 # pieces, so that each piece isn't too big.
364 # Without the sparserevlog capability, reading from the repository could use
364 # Without the sparserevlog capability, reading from the repository could use
365 # huge amounts of memory, because the whole span would be read at once,
365 # huge amounts of memory, because the whole span would be read at once,
366 # including all the intermediate revisions that aren't pertinent for the chain.
366 # including all the intermediate revisions that aren't pertinent for the chain.
367 # This is why once a repository has enabled sparse-read, it becomes required.
367 # This is why once a repository has enabled sparse-read, it becomes required.
368 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
368 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
369
369
370 # Functions receiving (ui, features) that extensions can register to impact
370 # Functions receiving (ui, features) that extensions can register to impact
371 # the ability to load repositories with custom requirements. Only
371 # the ability to load repositories with custom requirements. Only
372 # functions defined in loaded extensions are called.
372 # functions defined in loaded extensions are called.
373 #
373 #
374 # The function receives a set of requirement strings that the repository
374 # The function receives a set of requirement strings that the repository
375 # is capable of opening. Functions will typically add elements to the
375 # is capable of opening. Functions will typically add elements to the
376 # set to reflect that the extension knows how to handle that requirements.
376 # set to reflect that the extension knows how to handle that requirements.
377 featuresetupfuncs = set()
377 featuresetupfuncs = set()
378
378
379 @interfaceutil.implementer(repository.completelocalrepository)
379 @interfaceutil.implementer(repository.completelocalrepository)
380 class localrepository(object):
380 class localrepository(object):
381
381
382 # obsolete experimental requirements:
382 # obsolete experimental requirements:
383 # - manifestv2: An experimental new manifest format that allowed
383 # - manifestv2: An experimental new manifest format that allowed
384 # for stem compression of long paths. Experiment ended up not
384 # for stem compression of long paths. Experiment ended up not
385 # being successful (repository sizes went up due to worse delta
385 # being successful (repository sizes went up due to worse delta
386 # chains), and the code was deleted in 4.6.
386 # chains), and the code was deleted in 4.6.
387 supportedformats = {
387 supportedformats = {
388 'revlogv1',
388 'revlogv1',
389 'generaldelta',
389 'generaldelta',
390 'treemanifest',
390 'treemanifest',
391 REVLOGV2_REQUIREMENT,
391 REVLOGV2_REQUIREMENT,
392 SPARSEREVLOG_REQUIREMENT,
392 SPARSEREVLOG_REQUIREMENT,
393 }
393 }
394 _basesupported = supportedformats | {
394 _basesupported = supportedformats | {
395 'store',
395 'store',
396 'fncache',
396 'fncache',
397 'shared',
397 'shared',
398 'relshared',
398 'relshared',
399 'dotencode',
399 'dotencode',
400 'exp-sparse',
400 'exp-sparse',
401 'internal-phase'
401 'internal-phase'
402 }
402 }
403 openerreqs = {
403 openerreqs = {
404 'revlogv1',
404 'revlogv1',
405 'generaldelta',
405 'generaldelta',
406 'treemanifest',
406 'treemanifest',
407 }
407 }
408
408
409 # list of prefix for file which can be written without 'wlock'
409 # list of prefix for file which can be written without 'wlock'
410 # Extensions should extend this list when needed
410 # Extensions should extend this list when needed
411 _wlockfreeprefix = {
411 _wlockfreeprefix = {
412 # We migh consider requiring 'wlock' for the next
412 # We migh consider requiring 'wlock' for the next
413 # two, but pretty much all the existing code assume
413 # two, but pretty much all the existing code assume
414 # wlock is not needed so we keep them excluded for
414 # wlock is not needed so we keep them excluded for
415 # now.
415 # now.
416 'hgrc',
416 'hgrc',
417 'requires',
417 'requires',
418 # XXX cache is a complicatged business someone
418 # XXX cache is a complicatged business someone
419 # should investigate this in depth at some point
419 # should investigate this in depth at some point
420 'cache/',
420 'cache/',
421 # XXX shouldn't be dirstate covered by the wlock?
421 # XXX shouldn't be dirstate covered by the wlock?
422 'dirstate',
422 'dirstate',
423 # XXX bisect was still a bit too messy at the time
423 # XXX bisect was still a bit too messy at the time
424 # this changeset was introduced. Someone should fix
424 # this changeset was introduced. Someone should fix
425 # the remainig bit and drop this line
425 # the remainig bit and drop this line
426 'bisect.state',
426 'bisect.state',
427 }
427 }
428
428
429 def __init__(self, baseui, path, create=False, intents=None):
429 def __init__(self, baseui, path, intents=None):
430 """Create a new local repository instance.
431
432 Most callers should use ``hg.repository()`` or ``localrepo.instance()``
433 for obtaining a new repository object.
434 """
435
430 self.requirements = set()
436 self.requirements = set()
431 self.filtername = None
437 self.filtername = None
432 # wvfs: rooted at the repository root, used to access the working copy
438 # wvfs: rooted at the repository root, used to access the working copy
433 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
439 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
434 # vfs: rooted at .hg, used to access repo files outside of .hg/store
440 # vfs: rooted at .hg, used to access repo files outside of .hg/store
435 self.vfs = None
441 self.vfs = None
436 # svfs: usually rooted at .hg/store, used to access repository history
442 # svfs: usually rooted at .hg/store, used to access repository history
437 # If this is a shared repository, this vfs may point to another
443 # If this is a shared repository, this vfs may point to another
438 # repository's .hg/store directory.
444 # repository's .hg/store directory.
439 self.svfs = None
445 self.svfs = None
440 self.root = self.wvfs.base
446 self.root = self.wvfs.base
441 self.path = self.wvfs.join(".hg")
447 self.path = self.wvfs.join(".hg")
442 self.origroot = path
448 self.origroot = path
443 self.baseui = baseui
449 self.baseui = baseui
444 self.ui = baseui.copy()
450 self.ui = baseui.copy()
445 self.ui.copy = baseui.copy # prevent copying repo configuration
451 self.ui.copy = baseui.copy # prevent copying repo configuration
446 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
452 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
447 if (self.ui.configbool('devel', 'all-warnings') or
453 if (self.ui.configbool('devel', 'all-warnings') or
448 self.ui.configbool('devel', 'check-locks')):
454 self.ui.configbool('devel', 'check-locks')):
449 self.vfs.audit = self._getvfsward(self.vfs.audit)
455 self.vfs.audit = self._getvfsward(self.vfs.audit)
450 # A list of callback to shape the phase if no data were found.
456 # A list of callback to shape the phase if no data were found.
451 # Callback are in the form: func(repo, roots) --> processed root.
457 # Callback are in the form: func(repo, roots) --> processed root.
452 # This list it to be filled by extension during repo setup
458 # This list it to be filled by extension during repo setup
453 self._phasedefaults = []
459 self._phasedefaults = []
454 try:
460 try:
455 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
461 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
456 self._loadextensions()
462 self._loadextensions()
457 except IOError:
463 except IOError:
458 pass
464 pass
459
465
460 if featuresetupfuncs:
466 if featuresetupfuncs:
461 self.supported = set(self._basesupported) # use private copy
467 self.supported = set(self._basesupported) # use private copy
462 extmods = set(m.__name__ for n, m
468 extmods = set(m.__name__ for n, m
463 in extensions.extensions(self.ui))
469 in extensions.extensions(self.ui))
464 for setupfunc in featuresetupfuncs:
470 for setupfunc in featuresetupfuncs:
465 if setupfunc.__module__ in extmods:
471 if setupfunc.__module__ in extmods:
466 setupfunc(self.ui, self.supported)
472 setupfunc(self.ui, self.supported)
467 else:
473 else:
468 self.supported = self._basesupported
474 self.supported = self._basesupported
469 color.setup(self.ui)
475 color.setup(self.ui)
470
476
471 # Add compression engines.
477 # Add compression engines.
472 for name in util.compengines:
478 for name in util.compengines:
473 engine = util.compengines[name]
479 engine = util.compengines[name]
474 if engine.revlogheader():
480 if engine.revlogheader():
475 self.supported.add('exp-compression-%s' % name)
481 self.supported.add('exp-compression-%s' % name)
476
482
477 if not self.vfs.isdir():
483 if not self.vfs.isdir():
478 if create:
484 try:
479 self.requirements = newreporequirements(self.ui)
485 self.vfs.stat()
480
486 except OSError as inst:
481 if not self.wvfs.exists():
487 if inst.errno != errno.ENOENT:
482 self.wvfs.makedirs()
488 raise
483 self.vfs.makedir(notindexed=True)
489 raise error.RepoError(_("repository %s not found") % path)
484
485 if 'store' in self.requirements:
486 self.vfs.mkdir("store")
487
488 # create an invalid changelog
489 self.vfs.append(
490 "00changelog.i",
491 '\0\0\0\2' # represents revlogv2
492 ' dummy changelog to prevent using the old repo layout'
493 )
494 else:
495 try:
496 self.vfs.stat()
497 except OSError as inst:
498 if inst.errno != errno.ENOENT:
499 raise
500 raise error.RepoError(_("repository %s not found") % path)
501 elif create:
502 raise error.RepoError(_("repository %s already exists") % path)
503 else:
490 else:
504 try:
491 try:
505 self.requirements = scmutil.readrequires(
492 self.requirements = scmutil.readrequires(
506 self.vfs, self.supported)
493 self.vfs, self.supported)
507 except IOError as inst:
494 except IOError as inst:
508 if inst.errno != errno.ENOENT:
495 if inst.errno != errno.ENOENT:
509 raise
496 raise
510
497
511 cachepath = self.vfs.join('cache')
498 cachepath = self.vfs.join('cache')
512 self.sharedpath = self.path
499 self.sharedpath = self.path
513 try:
500 try:
514 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
501 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
515 if 'relshared' in self.requirements:
502 if 'relshared' in self.requirements:
516 sharedpath = self.vfs.join(sharedpath)
503 sharedpath = self.vfs.join(sharedpath)
517 vfs = vfsmod.vfs(sharedpath, realpath=True)
504 vfs = vfsmod.vfs(sharedpath, realpath=True)
518 cachepath = vfs.join('cache')
505 cachepath = vfs.join('cache')
519 s = vfs.base
506 s = vfs.base
520 if not vfs.exists():
507 if not vfs.exists():
521 raise error.RepoError(
508 raise error.RepoError(
522 _('.hg/sharedpath points to nonexistent directory %s') % s)
509 _('.hg/sharedpath points to nonexistent directory %s') % s)
523 self.sharedpath = s
510 self.sharedpath = s
524 except IOError as inst:
511 except IOError as inst:
525 if inst.errno != errno.ENOENT:
512 if inst.errno != errno.ENOENT:
526 raise
513 raise
527
514
528 if 'exp-sparse' in self.requirements and not sparse.enabled:
515 if 'exp-sparse' in self.requirements and not sparse.enabled:
529 raise error.RepoError(_('repository is using sparse feature but '
516 raise error.RepoError(_('repository is using sparse feature but '
530 'sparse is not enabled; enable the '
517 'sparse is not enabled; enable the '
531 '"sparse" extensions to access'))
518 '"sparse" extensions to access'))
532
519
533 self.store = store.store(
520 self.store = store.store(
534 self.requirements, self.sharedpath,
521 self.requirements, self.sharedpath,
535 lambda base: vfsmod.vfs(base, cacheaudited=True))
522 lambda base: vfsmod.vfs(base, cacheaudited=True))
536 self.spath = self.store.path
523 self.spath = self.store.path
537 self.svfs = self.store.vfs
524 self.svfs = self.store.vfs
538 self.sjoin = self.store.join
525 self.sjoin = self.store.join
539 self.vfs.createmode = self.store.createmode
526 self.vfs.createmode = self.store.createmode
540 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
527 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
541 self.cachevfs.createmode = self.store.createmode
528 self.cachevfs.createmode = self.store.createmode
542 if (self.ui.configbool('devel', 'all-warnings') or
529 if (self.ui.configbool('devel', 'all-warnings') or
543 self.ui.configbool('devel', 'check-locks')):
530 self.ui.configbool('devel', 'check-locks')):
544 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
531 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
545 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
532 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
546 else: # standard vfs
533 else: # standard vfs
547 self.svfs.audit = self._getsvfsward(self.svfs.audit)
534 self.svfs.audit = self._getsvfsward(self.svfs.audit)
548 self._applyopenerreqs()
535 self._applyopenerreqs()
549 if create:
550 self._writerequirements()
551
536
552 self._dirstatevalidatewarned = False
537 self._dirstatevalidatewarned = False
553
538
554 self._branchcaches = {}
539 self._branchcaches = {}
555 self._revbranchcache = None
540 self._revbranchcache = None
556 self._filterpats = {}
541 self._filterpats = {}
557 self._datafilters = {}
542 self._datafilters = {}
558 self._transref = self._lockref = self._wlockref = None
543 self._transref = self._lockref = self._wlockref = None
559
544
560 # A cache for various files under .hg/ that tracks file changes,
545 # A cache for various files under .hg/ that tracks file changes,
561 # (used by the filecache decorator)
546 # (used by the filecache decorator)
562 #
547 #
563 # Maps a property name to its util.filecacheentry
548 # Maps a property name to its util.filecacheentry
564 self._filecache = {}
549 self._filecache = {}
565
550
566 # hold sets of revision to be filtered
551 # hold sets of revision to be filtered
567 # should be cleared when something might have changed the filter value:
552 # should be cleared when something might have changed the filter value:
568 # - new changesets,
553 # - new changesets,
569 # - phase change,
554 # - phase change,
570 # - new obsolescence marker,
555 # - new obsolescence marker,
571 # - working directory parent change,
556 # - working directory parent change,
572 # - bookmark changes
557 # - bookmark changes
573 self.filteredrevcache = {}
558 self.filteredrevcache = {}
574
559
575 # post-dirstate-status hooks
560 # post-dirstate-status hooks
576 self._postdsstatus = []
561 self._postdsstatus = []
577
562
578 # generic mapping between names and nodes
563 # generic mapping between names and nodes
579 self.names = namespaces.namespaces()
564 self.names = namespaces.namespaces()
580
565
581 # Key to signature value.
566 # Key to signature value.
582 self._sparsesignaturecache = {}
567 self._sparsesignaturecache = {}
583 # Signature to cached matcher instance.
568 # Signature to cached matcher instance.
584 self._sparsematchercache = {}
569 self._sparsematchercache = {}
585
570
586 def _getvfsward(self, origfunc):
571 def _getvfsward(self, origfunc):
587 """build a ward for self.vfs"""
572 """build a ward for self.vfs"""
588 rref = weakref.ref(self)
573 rref = weakref.ref(self)
589 def checkvfs(path, mode=None):
574 def checkvfs(path, mode=None):
590 ret = origfunc(path, mode=mode)
575 ret = origfunc(path, mode=mode)
591 repo = rref()
576 repo = rref()
592 if (repo is None
577 if (repo is None
593 or not util.safehasattr(repo, '_wlockref')
578 or not util.safehasattr(repo, '_wlockref')
594 or not util.safehasattr(repo, '_lockref')):
579 or not util.safehasattr(repo, '_lockref')):
595 return
580 return
596 if mode in (None, 'r', 'rb'):
581 if mode in (None, 'r', 'rb'):
597 return
582 return
598 if path.startswith(repo.path):
583 if path.startswith(repo.path):
599 # truncate name relative to the repository (.hg)
584 # truncate name relative to the repository (.hg)
600 path = path[len(repo.path) + 1:]
585 path = path[len(repo.path) + 1:]
601 if path.startswith('cache/'):
586 if path.startswith('cache/'):
602 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
587 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
603 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
588 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
604 if path.startswith('journal.'):
589 if path.startswith('journal.'):
605 # journal is covered by 'lock'
590 # journal is covered by 'lock'
606 if repo._currentlock(repo._lockref) is None:
591 if repo._currentlock(repo._lockref) is None:
607 repo.ui.develwarn('write with no lock: "%s"' % path,
592 repo.ui.develwarn('write with no lock: "%s"' % path,
608 stacklevel=2, config='check-locks')
593 stacklevel=2, config='check-locks')
609 elif repo._currentlock(repo._wlockref) is None:
594 elif repo._currentlock(repo._wlockref) is None:
610 # rest of vfs files are covered by 'wlock'
595 # rest of vfs files are covered by 'wlock'
611 #
596 #
612 # exclude special files
597 # exclude special files
613 for prefix in self._wlockfreeprefix:
598 for prefix in self._wlockfreeprefix:
614 if path.startswith(prefix):
599 if path.startswith(prefix):
615 return
600 return
616 repo.ui.develwarn('write with no wlock: "%s"' % path,
601 repo.ui.develwarn('write with no wlock: "%s"' % path,
617 stacklevel=2, config='check-locks')
602 stacklevel=2, config='check-locks')
618 return ret
603 return ret
619 return checkvfs
604 return checkvfs
620
605
621 def _getsvfsward(self, origfunc):
606 def _getsvfsward(self, origfunc):
622 """build a ward for self.svfs"""
607 """build a ward for self.svfs"""
623 rref = weakref.ref(self)
608 rref = weakref.ref(self)
624 def checksvfs(path, mode=None):
609 def checksvfs(path, mode=None):
625 ret = origfunc(path, mode=mode)
610 ret = origfunc(path, mode=mode)
626 repo = rref()
611 repo = rref()
627 if repo is None or not util.safehasattr(repo, '_lockref'):
612 if repo is None or not util.safehasattr(repo, '_lockref'):
628 return
613 return
629 if mode in (None, 'r', 'rb'):
614 if mode in (None, 'r', 'rb'):
630 return
615 return
631 if path.startswith(repo.sharedpath):
616 if path.startswith(repo.sharedpath):
632 # truncate name relative to the repository (.hg)
617 # truncate name relative to the repository (.hg)
633 path = path[len(repo.sharedpath) + 1:]
618 path = path[len(repo.sharedpath) + 1:]
634 if repo._currentlock(repo._lockref) is None:
619 if repo._currentlock(repo._lockref) is None:
635 repo.ui.develwarn('write with no lock: "%s"' % path,
620 repo.ui.develwarn('write with no lock: "%s"' % path,
636 stacklevel=3)
621 stacklevel=3)
637 return ret
622 return ret
638 return checksvfs
623 return checksvfs
639
624
640 def close(self):
625 def close(self):
641 self._writecaches()
626 self._writecaches()
642
627
643 def _loadextensions(self):
628 def _loadextensions(self):
644 extensions.loadall(self.ui)
629 extensions.loadall(self.ui)
645
630
646 def _writecaches(self):
631 def _writecaches(self):
647 if self._revbranchcache:
632 if self._revbranchcache:
648 self._revbranchcache.write()
633 self._revbranchcache.write()
649
634
650 def _restrictcapabilities(self, caps):
635 def _restrictcapabilities(self, caps):
651 if self.ui.configbool('experimental', 'bundle2-advertise'):
636 if self.ui.configbool('experimental', 'bundle2-advertise'):
652 caps = set(caps)
637 caps = set(caps)
653 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
638 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
654 role='client'))
639 role='client'))
655 caps.add('bundle2=' + urlreq.quote(capsblob))
640 caps.add('bundle2=' + urlreq.quote(capsblob))
656 return caps
641 return caps
657
642
658 def _applyopenerreqs(self):
643 def _applyopenerreqs(self):
659 self.svfs.options = dict((r, 1) for r in self.requirements
644 self.svfs.options = dict((r, 1) for r in self.requirements
660 if r in self.openerreqs)
645 if r in self.openerreqs)
661 # experimental config: format.chunkcachesize
646 # experimental config: format.chunkcachesize
662 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
647 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
663 if chunkcachesize is not None:
648 if chunkcachesize is not None:
664 self.svfs.options['chunkcachesize'] = chunkcachesize
649 self.svfs.options['chunkcachesize'] = chunkcachesize
665 # experimental config: format.manifestcachesize
650 # experimental config: format.manifestcachesize
666 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
651 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
667 if manifestcachesize is not None:
652 if manifestcachesize is not None:
668 self.svfs.options['manifestcachesize'] = manifestcachesize
653 self.svfs.options['manifestcachesize'] = manifestcachesize
669 deltabothparents = self.ui.configbool('storage',
654 deltabothparents = self.ui.configbool('storage',
670 'revlog.optimize-delta-parent-choice')
655 'revlog.optimize-delta-parent-choice')
671 self.svfs.options['deltabothparents'] = deltabothparents
656 self.svfs.options['deltabothparents'] = deltabothparents
672 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
657 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
673 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
658 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
674 if 0 <= chainspan:
659 if 0 <= chainspan:
675 self.svfs.options['maxdeltachainspan'] = chainspan
660 self.svfs.options['maxdeltachainspan'] = chainspan
676 mmapindexthreshold = self.ui.configbytes('experimental',
661 mmapindexthreshold = self.ui.configbytes('experimental',
677 'mmapindexthreshold')
662 'mmapindexthreshold')
678 if mmapindexthreshold is not None:
663 if mmapindexthreshold is not None:
679 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
664 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
680 withsparseread = self.ui.configbool('experimental', 'sparse-read')
665 withsparseread = self.ui.configbool('experimental', 'sparse-read')
681 srdensitythres = float(self.ui.config('experimental',
666 srdensitythres = float(self.ui.config('experimental',
682 'sparse-read.density-threshold'))
667 'sparse-read.density-threshold'))
683 srmingapsize = self.ui.configbytes('experimental',
668 srmingapsize = self.ui.configbytes('experimental',
684 'sparse-read.min-gap-size')
669 'sparse-read.min-gap-size')
685 self.svfs.options['with-sparse-read'] = withsparseread
670 self.svfs.options['with-sparse-read'] = withsparseread
686 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
671 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
687 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
672 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
688 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
673 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
689 self.svfs.options['sparse-revlog'] = sparserevlog
674 self.svfs.options['sparse-revlog'] = sparserevlog
690 if sparserevlog:
675 if sparserevlog:
691 self.svfs.options['generaldelta'] = True
676 self.svfs.options['generaldelta'] = True
692 maxchainlen = None
677 maxchainlen = None
693 if sparserevlog:
678 if sparserevlog:
694 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
679 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
695 # experimental config: format.maxchainlen
680 # experimental config: format.maxchainlen
696 maxchainlen = self.ui.configint('format', 'maxchainlen', maxchainlen)
681 maxchainlen = self.ui.configint('format', 'maxchainlen', maxchainlen)
697 if maxchainlen is not None:
682 if maxchainlen is not None:
698 self.svfs.options['maxchainlen'] = maxchainlen
683 self.svfs.options['maxchainlen'] = maxchainlen
699
684
700 for r in self.requirements:
685 for r in self.requirements:
701 if r.startswith('exp-compression-'):
686 if r.startswith('exp-compression-'):
702 self.svfs.options['compengine'] = r[len('exp-compression-'):]
687 self.svfs.options['compengine'] = r[len('exp-compression-'):]
703
688
704 # TODO move "revlogv2" to openerreqs once finalized.
689 # TODO move "revlogv2" to openerreqs once finalized.
705 if REVLOGV2_REQUIREMENT in self.requirements:
690 if REVLOGV2_REQUIREMENT in self.requirements:
706 self.svfs.options['revlogv2'] = True
691 self.svfs.options['revlogv2'] = True
707
692
708 def _writerequirements(self):
693 def _writerequirements(self):
709 scmutil.writerequires(self.vfs, self.requirements)
694 scmutil.writerequires(self.vfs, self.requirements)
710
695
711 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
696 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
712 # self -> auditor -> self._checknested -> self
697 # self -> auditor -> self._checknested -> self
713
698
714 @property
699 @property
715 def auditor(self):
700 def auditor(self):
716 # This is only used by context.workingctx.match in order to
701 # This is only used by context.workingctx.match in order to
717 # detect files in subrepos.
702 # detect files in subrepos.
718 return pathutil.pathauditor(self.root, callback=self._checknested)
703 return pathutil.pathauditor(self.root, callback=self._checknested)
719
704
720 @property
705 @property
721 def nofsauditor(self):
706 def nofsauditor(self):
722 # This is only used by context.basectx.match in order to detect
707 # This is only used by context.basectx.match in order to detect
723 # files in subrepos.
708 # files in subrepos.
724 return pathutil.pathauditor(self.root, callback=self._checknested,
709 return pathutil.pathauditor(self.root, callback=self._checknested,
725 realfs=False, cached=True)
710 realfs=False, cached=True)
726
711
727 def _checknested(self, path):
712 def _checknested(self, path):
728 """Determine if path is a legal nested repository."""
713 """Determine if path is a legal nested repository."""
729 if not path.startswith(self.root):
714 if not path.startswith(self.root):
730 return False
715 return False
731 subpath = path[len(self.root) + 1:]
716 subpath = path[len(self.root) + 1:]
732 normsubpath = util.pconvert(subpath)
717 normsubpath = util.pconvert(subpath)
733
718
734 # XXX: Checking against the current working copy is wrong in
719 # XXX: Checking against the current working copy is wrong in
735 # the sense that it can reject things like
720 # the sense that it can reject things like
736 #
721 #
737 # $ hg cat -r 10 sub/x.txt
722 # $ hg cat -r 10 sub/x.txt
738 #
723 #
739 # if sub/ is no longer a subrepository in the working copy
724 # if sub/ is no longer a subrepository in the working copy
740 # parent revision.
725 # parent revision.
741 #
726 #
742 # However, it can of course also allow things that would have
727 # However, it can of course also allow things that would have
743 # been rejected before, such as the above cat command if sub/
728 # been rejected before, such as the above cat command if sub/
744 # is a subrepository now, but was a normal directory before.
729 # is a subrepository now, but was a normal directory before.
745 # The old path auditor would have rejected by mistake since it
730 # The old path auditor would have rejected by mistake since it
746 # panics when it sees sub/.hg/.
731 # panics when it sees sub/.hg/.
747 #
732 #
748 # All in all, checking against the working copy seems sensible
733 # All in all, checking against the working copy seems sensible
749 # since we want to prevent access to nested repositories on
734 # since we want to prevent access to nested repositories on
750 # the filesystem *now*.
735 # the filesystem *now*.
751 ctx = self[None]
736 ctx = self[None]
752 parts = util.splitpath(subpath)
737 parts = util.splitpath(subpath)
753 while parts:
738 while parts:
754 prefix = '/'.join(parts)
739 prefix = '/'.join(parts)
755 if prefix in ctx.substate:
740 if prefix in ctx.substate:
756 if prefix == normsubpath:
741 if prefix == normsubpath:
757 return True
742 return True
758 else:
743 else:
759 sub = ctx.sub(prefix)
744 sub = ctx.sub(prefix)
760 return sub.checknested(subpath[len(prefix) + 1:])
745 return sub.checknested(subpath[len(prefix) + 1:])
761 else:
746 else:
762 parts.pop()
747 parts.pop()
763 return False
748 return False
764
749
765 def peer(self):
750 def peer(self):
766 return localpeer(self) # not cached to avoid reference cycle
751 return localpeer(self) # not cached to avoid reference cycle
767
752
768 def unfiltered(self):
753 def unfiltered(self):
769 """Return unfiltered version of the repository
754 """Return unfiltered version of the repository
770
755
771 Intended to be overwritten by filtered repo."""
756 Intended to be overwritten by filtered repo."""
772 return self
757 return self
773
758
774 def filtered(self, name, visibilityexceptions=None):
759 def filtered(self, name, visibilityexceptions=None):
775 """Return a filtered version of a repository"""
760 """Return a filtered version of a repository"""
776 cls = repoview.newtype(self.unfiltered().__class__)
761 cls = repoview.newtype(self.unfiltered().__class__)
777 return cls(self, name, visibilityexceptions)
762 return cls(self, name, visibilityexceptions)
778
763
779 @repofilecache('bookmarks', 'bookmarks.current')
764 @repofilecache('bookmarks', 'bookmarks.current')
780 def _bookmarks(self):
765 def _bookmarks(self):
781 return bookmarks.bmstore(self)
766 return bookmarks.bmstore(self)
782
767
783 @property
768 @property
784 def _activebookmark(self):
769 def _activebookmark(self):
785 return self._bookmarks.active
770 return self._bookmarks.active
786
771
787 # _phasesets depend on changelog. what we need is to call
772 # _phasesets depend on changelog. what we need is to call
788 # _phasecache.invalidate() if '00changelog.i' was changed, but it
773 # _phasecache.invalidate() if '00changelog.i' was changed, but it
789 # can't be easily expressed in filecache mechanism.
774 # can't be easily expressed in filecache mechanism.
790 @storecache('phaseroots', '00changelog.i')
775 @storecache('phaseroots', '00changelog.i')
791 def _phasecache(self):
776 def _phasecache(self):
792 return phases.phasecache(self, self._phasedefaults)
777 return phases.phasecache(self, self._phasedefaults)
793
778
794 @storecache('obsstore')
779 @storecache('obsstore')
795 def obsstore(self):
780 def obsstore(self):
796 return obsolete.makestore(self.ui, self)
781 return obsolete.makestore(self.ui, self)
797
782
798 @storecache('00changelog.i')
783 @storecache('00changelog.i')
799 def changelog(self):
784 def changelog(self):
800 return changelog.changelog(self.svfs,
785 return changelog.changelog(self.svfs,
801 trypending=txnutil.mayhavepending(self.root))
786 trypending=txnutil.mayhavepending(self.root))
802
787
803 def _constructmanifest(self):
788 def _constructmanifest(self):
804 # This is a temporary function while we migrate from manifest to
789 # This is a temporary function while we migrate from manifest to
805 # manifestlog. It allows bundlerepo and unionrepo to intercept the
790 # manifestlog. It allows bundlerepo and unionrepo to intercept the
806 # manifest creation.
791 # manifest creation.
807 return manifest.manifestrevlog(self.svfs)
792 return manifest.manifestrevlog(self.svfs)
808
793
809 @storecache('00manifest.i')
794 @storecache('00manifest.i')
810 def manifestlog(self):
795 def manifestlog(self):
811 return manifest.manifestlog(self.svfs, self)
796 return manifest.manifestlog(self.svfs, self)
812
797
813 @repofilecache('dirstate')
798 @repofilecache('dirstate')
814 def dirstate(self):
799 def dirstate(self):
815 return self._makedirstate()
800 return self._makedirstate()
816
801
817 def _makedirstate(self):
802 def _makedirstate(self):
818 """Extension point for wrapping the dirstate per-repo."""
803 """Extension point for wrapping the dirstate per-repo."""
819 sparsematchfn = lambda: sparse.matcher(self)
804 sparsematchfn = lambda: sparse.matcher(self)
820
805
821 return dirstate.dirstate(self.vfs, self.ui, self.root,
806 return dirstate.dirstate(self.vfs, self.ui, self.root,
822 self._dirstatevalidate, sparsematchfn)
807 self._dirstatevalidate, sparsematchfn)
823
808
824 def _dirstatevalidate(self, node):
809 def _dirstatevalidate(self, node):
825 try:
810 try:
826 self.changelog.rev(node)
811 self.changelog.rev(node)
827 return node
812 return node
828 except error.LookupError:
813 except error.LookupError:
829 if not self._dirstatevalidatewarned:
814 if not self._dirstatevalidatewarned:
830 self._dirstatevalidatewarned = True
815 self._dirstatevalidatewarned = True
831 self.ui.warn(_("warning: ignoring unknown"
816 self.ui.warn(_("warning: ignoring unknown"
832 " working parent %s!\n") % short(node))
817 " working parent %s!\n") % short(node))
833 return nullid
818 return nullid
834
819
835 @storecache(narrowspec.FILENAME)
820 @storecache(narrowspec.FILENAME)
836 def narrowpats(self):
821 def narrowpats(self):
837 """matcher patterns for this repository's narrowspec
822 """matcher patterns for this repository's narrowspec
838
823
839 A tuple of (includes, excludes).
824 A tuple of (includes, excludes).
840 """
825 """
841 source = self
826 source = self
842 if self.shared():
827 if self.shared():
843 from . import hg
828 from . import hg
844 source = hg.sharedreposource(self)
829 source = hg.sharedreposource(self)
845 return narrowspec.load(source)
830 return narrowspec.load(source)
846
831
847 @storecache(narrowspec.FILENAME)
832 @storecache(narrowspec.FILENAME)
848 def _narrowmatch(self):
833 def _narrowmatch(self):
849 if repository.NARROW_REQUIREMENT not in self.requirements:
834 if repository.NARROW_REQUIREMENT not in self.requirements:
850 return matchmod.always(self.root, '')
835 return matchmod.always(self.root, '')
851 include, exclude = self.narrowpats
836 include, exclude = self.narrowpats
852 return narrowspec.match(self.root, include=include, exclude=exclude)
837 return narrowspec.match(self.root, include=include, exclude=exclude)
853
838
854 # TODO(martinvonz): make this property-like instead?
839 # TODO(martinvonz): make this property-like instead?
855 def narrowmatch(self):
840 def narrowmatch(self):
856 return self._narrowmatch
841 return self._narrowmatch
857
842
858 def setnarrowpats(self, newincludes, newexcludes):
843 def setnarrowpats(self, newincludes, newexcludes):
859 target = self
844 target = self
860 if self.shared():
845 if self.shared():
861 from . import hg
846 from . import hg
862 target = hg.sharedreposource(self)
847 target = hg.sharedreposource(self)
863 narrowspec.save(target, newincludes, newexcludes)
848 narrowspec.save(target, newincludes, newexcludes)
864 self.invalidate(clearfilecache=True)
849 self.invalidate(clearfilecache=True)
865
850
866 def __getitem__(self, changeid):
851 def __getitem__(self, changeid):
867 if changeid is None:
852 if changeid is None:
868 return context.workingctx(self)
853 return context.workingctx(self)
869 if isinstance(changeid, context.basectx):
854 if isinstance(changeid, context.basectx):
870 return changeid
855 return changeid
871 if isinstance(changeid, slice):
856 if isinstance(changeid, slice):
872 # wdirrev isn't contiguous so the slice shouldn't include it
857 # wdirrev isn't contiguous so the slice shouldn't include it
873 return [context.changectx(self, i)
858 return [context.changectx(self, i)
874 for i in pycompat.xrange(*changeid.indices(len(self)))
859 for i in pycompat.xrange(*changeid.indices(len(self)))
875 if i not in self.changelog.filteredrevs]
860 if i not in self.changelog.filteredrevs]
876 try:
861 try:
877 return context.changectx(self, changeid)
862 return context.changectx(self, changeid)
878 except error.WdirUnsupported:
863 except error.WdirUnsupported:
879 return context.workingctx(self)
864 return context.workingctx(self)
880
865
881 def __contains__(self, changeid):
866 def __contains__(self, changeid):
882 """True if the given changeid exists
867 """True if the given changeid exists
883
868
884 error.AmbiguousPrefixLookupError is raised if an ambiguous node
869 error.AmbiguousPrefixLookupError is raised if an ambiguous node
885 specified.
870 specified.
886 """
871 """
887 try:
872 try:
888 self[changeid]
873 self[changeid]
889 return True
874 return True
890 except error.RepoLookupError:
875 except error.RepoLookupError:
891 return False
876 return False
892
877
893 def __nonzero__(self):
878 def __nonzero__(self):
894 return True
879 return True
895
880
896 __bool__ = __nonzero__
881 __bool__ = __nonzero__
897
882
898 def __len__(self):
883 def __len__(self):
899 # no need to pay the cost of repoview.changelog
884 # no need to pay the cost of repoview.changelog
900 unfi = self.unfiltered()
885 unfi = self.unfiltered()
901 return len(unfi.changelog)
886 return len(unfi.changelog)
902
887
903 def __iter__(self):
888 def __iter__(self):
904 return iter(self.changelog)
889 return iter(self.changelog)
905
890
906 def revs(self, expr, *args):
891 def revs(self, expr, *args):
907 '''Find revisions matching a revset.
892 '''Find revisions matching a revset.
908
893
909 The revset is specified as a string ``expr`` that may contain
894 The revset is specified as a string ``expr`` that may contain
910 %-formatting to escape certain types. See ``revsetlang.formatspec``.
895 %-formatting to escape certain types. See ``revsetlang.formatspec``.
911
896
912 Revset aliases from the configuration are not expanded. To expand
897 Revset aliases from the configuration are not expanded. To expand
913 user aliases, consider calling ``scmutil.revrange()`` or
898 user aliases, consider calling ``scmutil.revrange()`` or
914 ``repo.anyrevs([expr], user=True)``.
899 ``repo.anyrevs([expr], user=True)``.
915
900
916 Returns a revset.abstractsmartset, which is a list-like interface
901 Returns a revset.abstractsmartset, which is a list-like interface
917 that contains integer revisions.
902 that contains integer revisions.
918 '''
903 '''
919 expr = revsetlang.formatspec(expr, *args)
904 expr = revsetlang.formatspec(expr, *args)
920 m = revset.match(None, expr)
905 m = revset.match(None, expr)
921 return m(self)
906 return m(self)
922
907
923 def set(self, expr, *args):
908 def set(self, expr, *args):
924 '''Find revisions matching a revset and emit changectx instances.
909 '''Find revisions matching a revset and emit changectx instances.
925
910
926 This is a convenience wrapper around ``revs()`` that iterates the
911 This is a convenience wrapper around ``revs()`` that iterates the
927 result and is a generator of changectx instances.
912 result and is a generator of changectx instances.
928
913
929 Revset aliases from the configuration are not expanded. To expand
914 Revset aliases from the configuration are not expanded. To expand
930 user aliases, consider calling ``scmutil.revrange()``.
915 user aliases, consider calling ``scmutil.revrange()``.
931 '''
916 '''
932 for r in self.revs(expr, *args):
917 for r in self.revs(expr, *args):
933 yield self[r]
918 yield self[r]
934
919
935 def anyrevs(self, specs, user=False, localalias=None):
920 def anyrevs(self, specs, user=False, localalias=None):
936 '''Find revisions matching one of the given revsets.
921 '''Find revisions matching one of the given revsets.
937
922
938 Revset aliases from the configuration are not expanded by default. To
923 Revset aliases from the configuration are not expanded by default. To
939 expand user aliases, specify ``user=True``. To provide some local
924 expand user aliases, specify ``user=True``. To provide some local
940 definitions overriding user aliases, set ``localalias`` to
925 definitions overriding user aliases, set ``localalias`` to
941 ``{name: definitionstring}``.
926 ``{name: definitionstring}``.
942 '''
927 '''
943 if user:
928 if user:
944 m = revset.matchany(self.ui, specs,
929 m = revset.matchany(self.ui, specs,
945 lookup=revset.lookupfn(self),
930 lookup=revset.lookupfn(self),
946 localalias=localalias)
931 localalias=localalias)
947 else:
932 else:
948 m = revset.matchany(None, specs, localalias=localalias)
933 m = revset.matchany(None, specs, localalias=localalias)
949 return m(self)
934 return m(self)
950
935
951 def url(self):
936 def url(self):
952 return 'file:' + self.root
937 return 'file:' + self.root
953
938
954 def hook(self, name, throw=False, **args):
939 def hook(self, name, throw=False, **args):
955 """Call a hook, passing this repo instance.
940 """Call a hook, passing this repo instance.
956
941
957 This a convenience method to aid invoking hooks. Extensions likely
942 This a convenience method to aid invoking hooks. Extensions likely
958 won't call this unless they have registered a custom hook or are
943 won't call this unless they have registered a custom hook or are
959 replacing code that is expected to call a hook.
944 replacing code that is expected to call a hook.
960 """
945 """
961 return hook.hook(self.ui, self, name, throw, **args)
946 return hook.hook(self.ui, self, name, throw, **args)
962
947
963 @filteredpropertycache
948 @filteredpropertycache
964 def _tagscache(self):
949 def _tagscache(self):
965 '''Returns a tagscache object that contains various tags related
950 '''Returns a tagscache object that contains various tags related
966 caches.'''
951 caches.'''
967
952
968 # This simplifies its cache management by having one decorated
953 # This simplifies its cache management by having one decorated
969 # function (this one) and the rest simply fetch things from it.
954 # function (this one) and the rest simply fetch things from it.
970 class tagscache(object):
955 class tagscache(object):
971 def __init__(self):
956 def __init__(self):
972 # These two define the set of tags for this repository. tags
957 # These two define the set of tags for this repository. tags
973 # maps tag name to node; tagtypes maps tag name to 'global' or
958 # maps tag name to node; tagtypes maps tag name to 'global' or
974 # 'local'. (Global tags are defined by .hgtags across all
959 # 'local'. (Global tags are defined by .hgtags across all
975 # heads, and local tags are defined in .hg/localtags.)
960 # heads, and local tags are defined in .hg/localtags.)
976 # They constitute the in-memory cache of tags.
961 # They constitute the in-memory cache of tags.
977 self.tags = self.tagtypes = None
962 self.tags = self.tagtypes = None
978
963
979 self.nodetagscache = self.tagslist = None
964 self.nodetagscache = self.tagslist = None
980
965
981 cache = tagscache()
966 cache = tagscache()
982 cache.tags, cache.tagtypes = self._findtags()
967 cache.tags, cache.tagtypes = self._findtags()
983
968
984 return cache
969 return cache
985
970
986 def tags(self):
971 def tags(self):
987 '''return a mapping of tag to node'''
972 '''return a mapping of tag to node'''
988 t = {}
973 t = {}
989 if self.changelog.filteredrevs:
974 if self.changelog.filteredrevs:
990 tags, tt = self._findtags()
975 tags, tt = self._findtags()
991 else:
976 else:
992 tags = self._tagscache.tags
977 tags = self._tagscache.tags
993 for k, v in tags.iteritems():
978 for k, v in tags.iteritems():
994 try:
979 try:
995 # ignore tags to unknown nodes
980 # ignore tags to unknown nodes
996 self.changelog.rev(v)
981 self.changelog.rev(v)
997 t[k] = v
982 t[k] = v
998 except (error.LookupError, ValueError):
983 except (error.LookupError, ValueError):
999 pass
984 pass
1000 return t
985 return t
1001
986
1002 def _findtags(self):
987 def _findtags(self):
1003 '''Do the hard work of finding tags. Return a pair of dicts
988 '''Do the hard work of finding tags. Return a pair of dicts
1004 (tags, tagtypes) where tags maps tag name to node, and tagtypes
989 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1005 maps tag name to a string like \'global\' or \'local\'.
990 maps tag name to a string like \'global\' or \'local\'.
1006 Subclasses or extensions are free to add their own tags, but
991 Subclasses or extensions are free to add their own tags, but
1007 should be aware that the returned dicts will be retained for the
992 should be aware that the returned dicts will be retained for the
1008 duration of the localrepo object.'''
993 duration of the localrepo object.'''
1009
994
1010 # XXX what tagtype should subclasses/extensions use? Currently
995 # XXX what tagtype should subclasses/extensions use? Currently
1011 # mq and bookmarks add tags, but do not set the tagtype at all.
996 # mq and bookmarks add tags, but do not set the tagtype at all.
1012 # Should each extension invent its own tag type? Should there
997 # Should each extension invent its own tag type? Should there
1013 # be one tagtype for all such "virtual" tags? Or is the status
998 # be one tagtype for all such "virtual" tags? Or is the status
1014 # quo fine?
999 # quo fine?
1015
1000
1016
1001
1017 # map tag name to (node, hist)
1002 # map tag name to (node, hist)
1018 alltags = tagsmod.findglobaltags(self.ui, self)
1003 alltags = tagsmod.findglobaltags(self.ui, self)
1019 # map tag name to tag type
1004 # map tag name to tag type
1020 tagtypes = dict((tag, 'global') for tag in alltags)
1005 tagtypes = dict((tag, 'global') for tag in alltags)
1021
1006
1022 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1007 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1023
1008
1024 # Build the return dicts. Have to re-encode tag names because
1009 # Build the return dicts. Have to re-encode tag names because
1025 # the tags module always uses UTF-8 (in order not to lose info
1010 # the tags module always uses UTF-8 (in order not to lose info
1026 # writing to the cache), but the rest of Mercurial wants them in
1011 # writing to the cache), but the rest of Mercurial wants them in
1027 # local encoding.
1012 # local encoding.
1028 tags = {}
1013 tags = {}
1029 for (name, (node, hist)) in alltags.iteritems():
1014 for (name, (node, hist)) in alltags.iteritems():
1030 if node != nullid:
1015 if node != nullid:
1031 tags[encoding.tolocal(name)] = node
1016 tags[encoding.tolocal(name)] = node
1032 tags['tip'] = self.changelog.tip()
1017 tags['tip'] = self.changelog.tip()
1033 tagtypes = dict([(encoding.tolocal(name), value)
1018 tagtypes = dict([(encoding.tolocal(name), value)
1034 for (name, value) in tagtypes.iteritems()])
1019 for (name, value) in tagtypes.iteritems()])
1035 return (tags, tagtypes)
1020 return (tags, tagtypes)
1036
1021
1037 def tagtype(self, tagname):
1022 def tagtype(self, tagname):
1038 '''
1023 '''
1039 return the type of the given tag. result can be:
1024 return the type of the given tag. result can be:
1040
1025
1041 'local' : a local tag
1026 'local' : a local tag
1042 'global' : a global tag
1027 'global' : a global tag
1043 None : tag does not exist
1028 None : tag does not exist
1044 '''
1029 '''
1045
1030
1046 return self._tagscache.tagtypes.get(tagname)
1031 return self._tagscache.tagtypes.get(tagname)
1047
1032
1048 def tagslist(self):
1033 def tagslist(self):
1049 '''return a list of tags ordered by revision'''
1034 '''return a list of tags ordered by revision'''
1050 if not self._tagscache.tagslist:
1035 if not self._tagscache.tagslist:
1051 l = []
1036 l = []
1052 for t, n in self.tags().iteritems():
1037 for t, n in self.tags().iteritems():
1053 l.append((self.changelog.rev(n), t, n))
1038 l.append((self.changelog.rev(n), t, n))
1054 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1039 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1055
1040
1056 return self._tagscache.tagslist
1041 return self._tagscache.tagslist
1057
1042
1058 def nodetags(self, node):
1043 def nodetags(self, node):
1059 '''return the tags associated with a node'''
1044 '''return the tags associated with a node'''
1060 if not self._tagscache.nodetagscache:
1045 if not self._tagscache.nodetagscache:
1061 nodetagscache = {}
1046 nodetagscache = {}
1062 for t, n in self._tagscache.tags.iteritems():
1047 for t, n in self._tagscache.tags.iteritems():
1063 nodetagscache.setdefault(n, []).append(t)
1048 nodetagscache.setdefault(n, []).append(t)
1064 for tags in nodetagscache.itervalues():
1049 for tags in nodetagscache.itervalues():
1065 tags.sort()
1050 tags.sort()
1066 self._tagscache.nodetagscache = nodetagscache
1051 self._tagscache.nodetagscache = nodetagscache
1067 return self._tagscache.nodetagscache.get(node, [])
1052 return self._tagscache.nodetagscache.get(node, [])
1068
1053
1069 def nodebookmarks(self, node):
1054 def nodebookmarks(self, node):
1070 """return the list of bookmarks pointing to the specified node"""
1055 """return the list of bookmarks pointing to the specified node"""
1071 return self._bookmarks.names(node)
1056 return self._bookmarks.names(node)
1072
1057
1073 def branchmap(self):
1058 def branchmap(self):
1074 '''returns a dictionary {branch: [branchheads]} with branchheads
1059 '''returns a dictionary {branch: [branchheads]} with branchheads
1075 ordered by increasing revision number'''
1060 ordered by increasing revision number'''
1076 branchmap.updatecache(self)
1061 branchmap.updatecache(self)
1077 return self._branchcaches[self.filtername]
1062 return self._branchcaches[self.filtername]
1078
1063
1079 @unfilteredmethod
1064 @unfilteredmethod
1080 def revbranchcache(self):
1065 def revbranchcache(self):
1081 if not self._revbranchcache:
1066 if not self._revbranchcache:
1082 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1067 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1083 return self._revbranchcache
1068 return self._revbranchcache
1084
1069
1085 def branchtip(self, branch, ignoremissing=False):
1070 def branchtip(self, branch, ignoremissing=False):
1086 '''return the tip node for a given branch
1071 '''return the tip node for a given branch
1087
1072
1088 If ignoremissing is True, then this method will not raise an error.
1073 If ignoremissing is True, then this method will not raise an error.
1089 This is helpful for callers that only expect None for a missing branch
1074 This is helpful for callers that only expect None for a missing branch
1090 (e.g. namespace).
1075 (e.g. namespace).
1091
1076
1092 '''
1077 '''
1093 try:
1078 try:
1094 return self.branchmap().branchtip(branch)
1079 return self.branchmap().branchtip(branch)
1095 except KeyError:
1080 except KeyError:
1096 if not ignoremissing:
1081 if not ignoremissing:
1097 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1082 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1098 else:
1083 else:
1099 pass
1084 pass
1100
1085
1101 def lookup(self, key):
1086 def lookup(self, key):
1102 return scmutil.revsymbol(self, key).node()
1087 return scmutil.revsymbol(self, key).node()
1103
1088
1104 def lookupbranch(self, key):
1089 def lookupbranch(self, key):
1105 if key in self.branchmap():
1090 if key in self.branchmap():
1106 return key
1091 return key
1107
1092
1108 return scmutil.revsymbol(self, key).branch()
1093 return scmutil.revsymbol(self, key).branch()
1109
1094
1110 def known(self, nodes):
1095 def known(self, nodes):
1111 cl = self.changelog
1096 cl = self.changelog
1112 nm = cl.nodemap
1097 nm = cl.nodemap
1113 filtered = cl.filteredrevs
1098 filtered = cl.filteredrevs
1114 result = []
1099 result = []
1115 for n in nodes:
1100 for n in nodes:
1116 r = nm.get(n)
1101 r = nm.get(n)
1117 resp = not (r is None or r in filtered)
1102 resp = not (r is None or r in filtered)
1118 result.append(resp)
1103 result.append(resp)
1119 return result
1104 return result
1120
1105
1121 def local(self):
1106 def local(self):
1122 return self
1107 return self
1123
1108
1124 def publishing(self):
1109 def publishing(self):
1125 # it's safe (and desirable) to trust the publish flag unconditionally
1110 # it's safe (and desirable) to trust the publish flag unconditionally
1126 # so that we don't finalize changes shared between users via ssh or nfs
1111 # so that we don't finalize changes shared between users via ssh or nfs
1127 return self.ui.configbool('phases', 'publish', untrusted=True)
1112 return self.ui.configbool('phases', 'publish', untrusted=True)
1128
1113
1129 def cancopy(self):
1114 def cancopy(self):
1130 # so statichttprepo's override of local() works
1115 # so statichttprepo's override of local() works
1131 if not self.local():
1116 if not self.local():
1132 return False
1117 return False
1133 if not self.publishing():
1118 if not self.publishing():
1134 return True
1119 return True
1135 # if publishing we can't copy if there is filtered content
1120 # if publishing we can't copy if there is filtered content
1136 return not self.filtered('visible').changelog.filteredrevs
1121 return not self.filtered('visible').changelog.filteredrevs
1137
1122
1138 def shared(self):
1123 def shared(self):
1139 '''the type of shared repository (None if not shared)'''
1124 '''the type of shared repository (None if not shared)'''
1140 if self.sharedpath != self.path:
1125 if self.sharedpath != self.path:
1141 return 'store'
1126 return 'store'
1142 return None
1127 return None
1143
1128
1144 def wjoin(self, f, *insidef):
1129 def wjoin(self, f, *insidef):
1145 return self.vfs.reljoin(self.root, f, *insidef)
1130 return self.vfs.reljoin(self.root, f, *insidef)
1146
1131
1147 def file(self, f):
1132 def file(self, f):
1148 if f[0] == '/':
1133 if f[0] == '/':
1149 f = f[1:]
1134 f = f[1:]
1150 return filelog.filelog(self.svfs, f)
1135 return filelog.filelog(self.svfs, f)
1151
1136
1152 def setparents(self, p1, p2=nullid):
1137 def setparents(self, p1, p2=nullid):
1153 with self.dirstate.parentchange():
1138 with self.dirstate.parentchange():
1154 copies = self.dirstate.setparents(p1, p2)
1139 copies = self.dirstate.setparents(p1, p2)
1155 pctx = self[p1]
1140 pctx = self[p1]
1156 if copies:
1141 if copies:
1157 # Adjust copy records, the dirstate cannot do it, it
1142 # Adjust copy records, the dirstate cannot do it, it
1158 # requires access to parents manifests. Preserve them
1143 # requires access to parents manifests. Preserve them
1159 # only for entries added to first parent.
1144 # only for entries added to first parent.
1160 for f in copies:
1145 for f in copies:
1161 if f not in pctx and copies[f] in pctx:
1146 if f not in pctx and copies[f] in pctx:
1162 self.dirstate.copy(copies[f], f)
1147 self.dirstate.copy(copies[f], f)
1163 if p2 == nullid:
1148 if p2 == nullid:
1164 for f, s in sorted(self.dirstate.copies().items()):
1149 for f, s in sorted(self.dirstate.copies().items()):
1165 if f not in pctx and s not in pctx:
1150 if f not in pctx and s not in pctx:
1166 self.dirstate.copy(None, f)
1151 self.dirstate.copy(None, f)
1167
1152
1168 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1153 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1169 """changeid can be a changeset revision, node, or tag.
1154 """changeid can be a changeset revision, node, or tag.
1170 fileid can be a file revision or node."""
1155 fileid can be a file revision or node."""
1171 return context.filectx(self, path, changeid, fileid,
1156 return context.filectx(self, path, changeid, fileid,
1172 changectx=changectx)
1157 changectx=changectx)
1173
1158
1174 def getcwd(self):
1159 def getcwd(self):
1175 return self.dirstate.getcwd()
1160 return self.dirstate.getcwd()
1176
1161
1177 def pathto(self, f, cwd=None):
1162 def pathto(self, f, cwd=None):
1178 return self.dirstate.pathto(f, cwd)
1163 return self.dirstate.pathto(f, cwd)
1179
1164
1180 def _loadfilter(self, filter):
1165 def _loadfilter(self, filter):
1181 if filter not in self._filterpats:
1166 if filter not in self._filterpats:
1182 l = []
1167 l = []
1183 for pat, cmd in self.ui.configitems(filter):
1168 for pat, cmd in self.ui.configitems(filter):
1184 if cmd == '!':
1169 if cmd == '!':
1185 continue
1170 continue
1186 mf = matchmod.match(self.root, '', [pat])
1171 mf = matchmod.match(self.root, '', [pat])
1187 fn = None
1172 fn = None
1188 params = cmd
1173 params = cmd
1189 for name, filterfn in self._datafilters.iteritems():
1174 for name, filterfn in self._datafilters.iteritems():
1190 if cmd.startswith(name):
1175 if cmd.startswith(name):
1191 fn = filterfn
1176 fn = filterfn
1192 params = cmd[len(name):].lstrip()
1177 params = cmd[len(name):].lstrip()
1193 break
1178 break
1194 if not fn:
1179 if not fn:
1195 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1180 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1196 # Wrap old filters not supporting keyword arguments
1181 # Wrap old filters not supporting keyword arguments
1197 if not pycompat.getargspec(fn)[2]:
1182 if not pycompat.getargspec(fn)[2]:
1198 oldfn = fn
1183 oldfn = fn
1199 fn = lambda s, c, **kwargs: oldfn(s, c)
1184 fn = lambda s, c, **kwargs: oldfn(s, c)
1200 l.append((mf, fn, params))
1185 l.append((mf, fn, params))
1201 self._filterpats[filter] = l
1186 self._filterpats[filter] = l
1202 return self._filterpats[filter]
1187 return self._filterpats[filter]
1203
1188
1204 def _filter(self, filterpats, filename, data):
1189 def _filter(self, filterpats, filename, data):
1205 for mf, fn, cmd in filterpats:
1190 for mf, fn, cmd in filterpats:
1206 if mf(filename):
1191 if mf(filename):
1207 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1192 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1208 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1193 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1209 break
1194 break
1210
1195
1211 return data
1196 return data
1212
1197
1213 @unfilteredpropertycache
1198 @unfilteredpropertycache
1214 def _encodefilterpats(self):
1199 def _encodefilterpats(self):
1215 return self._loadfilter('encode')
1200 return self._loadfilter('encode')
1216
1201
1217 @unfilteredpropertycache
1202 @unfilteredpropertycache
1218 def _decodefilterpats(self):
1203 def _decodefilterpats(self):
1219 return self._loadfilter('decode')
1204 return self._loadfilter('decode')
1220
1205
1221 def adddatafilter(self, name, filter):
1206 def adddatafilter(self, name, filter):
1222 self._datafilters[name] = filter
1207 self._datafilters[name] = filter
1223
1208
1224 def wread(self, filename):
1209 def wread(self, filename):
1225 if self.wvfs.islink(filename):
1210 if self.wvfs.islink(filename):
1226 data = self.wvfs.readlink(filename)
1211 data = self.wvfs.readlink(filename)
1227 else:
1212 else:
1228 data = self.wvfs.read(filename)
1213 data = self.wvfs.read(filename)
1229 return self._filter(self._encodefilterpats, filename, data)
1214 return self._filter(self._encodefilterpats, filename, data)
1230
1215
1231 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1216 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1232 """write ``data`` into ``filename`` in the working directory
1217 """write ``data`` into ``filename`` in the working directory
1233
1218
1234 This returns length of written (maybe decoded) data.
1219 This returns length of written (maybe decoded) data.
1235 """
1220 """
1236 data = self._filter(self._decodefilterpats, filename, data)
1221 data = self._filter(self._decodefilterpats, filename, data)
1237 if 'l' in flags:
1222 if 'l' in flags:
1238 self.wvfs.symlink(data, filename)
1223 self.wvfs.symlink(data, filename)
1239 else:
1224 else:
1240 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1225 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1241 **kwargs)
1226 **kwargs)
1242 if 'x' in flags:
1227 if 'x' in flags:
1243 self.wvfs.setflags(filename, False, True)
1228 self.wvfs.setflags(filename, False, True)
1244 else:
1229 else:
1245 self.wvfs.setflags(filename, False, False)
1230 self.wvfs.setflags(filename, False, False)
1246 return len(data)
1231 return len(data)
1247
1232
1248 def wwritedata(self, filename, data):
1233 def wwritedata(self, filename, data):
1249 return self._filter(self._decodefilterpats, filename, data)
1234 return self._filter(self._decodefilterpats, filename, data)
1250
1235
1251 def currenttransaction(self):
1236 def currenttransaction(self):
1252 """return the current transaction or None if non exists"""
1237 """return the current transaction or None if non exists"""
1253 if self._transref:
1238 if self._transref:
1254 tr = self._transref()
1239 tr = self._transref()
1255 else:
1240 else:
1256 tr = None
1241 tr = None
1257
1242
1258 if tr and tr.running():
1243 if tr and tr.running():
1259 return tr
1244 return tr
1260 return None
1245 return None
1261
1246
1262 def transaction(self, desc, report=None):
1247 def transaction(self, desc, report=None):
1263 if (self.ui.configbool('devel', 'all-warnings')
1248 if (self.ui.configbool('devel', 'all-warnings')
1264 or self.ui.configbool('devel', 'check-locks')):
1249 or self.ui.configbool('devel', 'check-locks')):
1265 if self._currentlock(self._lockref) is None:
1250 if self._currentlock(self._lockref) is None:
1266 raise error.ProgrammingError('transaction requires locking')
1251 raise error.ProgrammingError('transaction requires locking')
1267 tr = self.currenttransaction()
1252 tr = self.currenttransaction()
1268 if tr is not None:
1253 if tr is not None:
1269 return tr.nest(name=desc)
1254 return tr.nest(name=desc)
1270
1255
1271 # abort here if the journal already exists
1256 # abort here if the journal already exists
1272 if self.svfs.exists("journal"):
1257 if self.svfs.exists("journal"):
1273 raise error.RepoError(
1258 raise error.RepoError(
1274 _("abandoned transaction found"),
1259 _("abandoned transaction found"),
1275 hint=_("run 'hg recover' to clean up transaction"))
1260 hint=_("run 'hg recover' to clean up transaction"))
1276
1261
1277 idbase = "%.40f#%f" % (random.random(), time.time())
1262 idbase = "%.40f#%f" % (random.random(), time.time())
1278 ha = hex(hashlib.sha1(idbase).digest())
1263 ha = hex(hashlib.sha1(idbase).digest())
1279 txnid = 'TXN:' + ha
1264 txnid = 'TXN:' + ha
1280 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1265 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1281
1266
1282 self._writejournal(desc)
1267 self._writejournal(desc)
1283 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1268 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1284 if report:
1269 if report:
1285 rp = report
1270 rp = report
1286 else:
1271 else:
1287 rp = self.ui.warn
1272 rp = self.ui.warn
1288 vfsmap = {'plain': self.vfs} # root of .hg/
1273 vfsmap = {'plain': self.vfs} # root of .hg/
1289 # we must avoid cyclic reference between repo and transaction.
1274 # we must avoid cyclic reference between repo and transaction.
1290 reporef = weakref.ref(self)
1275 reporef = weakref.ref(self)
1291 # Code to track tag movement
1276 # Code to track tag movement
1292 #
1277 #
1293 # Since tags are all handled as file content, it is actually quite hard
1278 # Since tags are all handled as file content, it is actually quite hard
1294 # to track these movement from a code perspective. So we fallback to a
1279 # to track these movement from a code perspective. So we fallback to a
1295 # tracking at the repository level. One could envision to track changes
1280 # tracking at the repository level. One could envision to track changes
1296 # to the '.hgtags' file through changegroup apply but that fails to
1281 # to the '.hgtags' file through changegroup apply but that fails to
1297 # cope with case where transaction expose new heads without changegroup
1282 # cope with case where transaction expose new heads without changegroup
1298 # being involved (eg: phase movement).
1283 # being involved (eg: phase movement).
1299 #
1284 #
1300 # For now, We gate the feature behind a flag since this likely comes
1285 # For now, We gate the feature behind a flag since this likely comes
1301 # with performance impacts. The current code run more often than needed
1286 # with performance impacts. The current code run more often than needed
1302 # and do not use caches as much as it could. The current focus is on
1287 # and do not use caches as much as it could. The current focus is on
1303 # the behavior of the feature so we disable it by default. The flag
1288 # the behavior of the feature so we disable it by default. The flag
1304 # will be removed when we are happy with the performance impact.
1289 # will be removed when we are happy with the performance impact.
1305 #
1290 #
1306 # Once this feature is no longer experimental move the following
1291 # Once this feature is no longer experimental move the following
1307 # documentation to the appropriate help section:
1292 # documentation to the appropriate help section:
1308 #
1293 #
1309 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1294 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1310 # tags (new or changed or deleted tags). In addition the details of
1295 # tags (new or changed or deleted tags). In addition the details of
1311 # these changes are made available in a file at:
1296 # these changes are made available in a file at:
1312 # ``REPOROOT/.hg/changes/tags.changes``.
1297 # ``REPOROOT/.hg/changes/tags.changes``.
1313 # Make sure you check for HG_TAG_MOVED before reading that file as it
1298 # Make sure you check for HG_TAG_MOVED before reading that file as it
1314 # might exist from a previous transaction even if no tag were touched
1299 # might exist from a previous transaction even if no tag were touched
1315 # in this one. Changes are recorded in a line base format::
1300 # in this one. Changes are recorded in a line base format::
1316 #
1301 #
1317 # <action> <hex-node> <tag-name>\n
1302 # <action> <hex-node> <tag-name>\n
1318 #
1303 #
1319 # Actions are defined as follow:
1304 # Actions are defined as follow:
1320 # "-R": tag is removed,
1305 # "-R": tag is removed,
1321 # "+A": tag is added,
1306 # "+A": tag is added,
1322 # "-M": tag is moved (old value),
1307 # "-M": tag is moved (old value),
1323 # "+M": tag is moved (new value),
1308 # "+M": tag is moved (new value),
1324 tracktags = lambda x: None
1309 tracktags = lambda x: None
1325 # experimental config: experimental.hook-track-tags
1310 # experimental config: experimental.hook-track-tags
1326 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1311 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1327 if desc != 'strip' and shouldtracktags:
1312 if desc != 'strip' and shouldtracktags:
1328 oldheads = self.changelog.headrevs()
1313 oldheads = self.changelog.headrevs()
1329 def tracktags(tr2):
1314 def tracktags(tr2):
1330 repo = reporef()
1315 repo = reporef()
1331 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1316 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1332 newheads = repo.changelog.headrevs()
1317 newheads = repo.changelog.headrevs()
1333 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1318 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1334 # notes: we compare lists here.
1319 # notes: we compare lists here.
1335 # As we do it only once buiding set would not be cheaper
1320 # As we do it only once buiding set would not be cheaper
1336 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1321 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1337 if changes:
1322 if changes:
1338 tr2.hookargs['tag_moved'] = '1'
1323 tr2.hookargs['tag_moved'] = '1'
1339 with repo.vfs('changes/tags.changes', 'w',
1324 with repo.vfs('changes/tags.changes', 'w',
1340 atomictemp=True) as changesfile:
1325 atomictemp=True) as changesfile:
1341 # note: we do not register the file to the transaction
1326 # note: we do not register the file to the transaction
1342 # because we needs it to still exist on the transaction
1327 # because we needs it to still exist on the transaction
1343 # is close (for txnclose hooks)
1328 # is close (for txnclose hooks)
1344 tagsmod.writediff(changesfile, changes)
1329 tagsmod.writediff(changesfile, changes)
1345 def validate(tr2):
1330 def validate(tr2):
1346 """will run pre-closing hooks"""
1331 """will run pre-closing hooks"""
1347 # XXX the transaction API is a bit lacking here so we take a hacky
1332 # XXX the transaction API is a bit lacking here so we take a hacky
1348 # path for now
1333 # path for now
1349 #
1334 #
1350 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1335 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1351 # dict is copied before these run. In addition we needs the data
1336 # dict is copied before these run. In addition we needs the data
1352 # available to in memory hooks too.
1337 # available to in memory hooks too.
1353 #
1338 #
1354 # Moreover, we also need to make sure this runs before txnclose
1339 # Moreover, we also need to make sure this runs before txnclose
1355 # hooks and there is no "pending" mechanism that would execute
1340 # hooks and there is no "pending" mechanism that would execute
1356 # logic only if hooks are about to run.
1341 # logic only if hooks are about to run.
1357 #
1342 #
1358 # Fixing this limitation of the transaction is also needed to track
1343 # Fixing this limitation of the transaction is also needed to track
1359 # other families of changes (bookmarks, phases, obsolescence).
1344 # other families of changes (bookmarks, phases, obsolescence).
1360 #
1345 #
1361 # This will have to be fixed before we remove the experimental
1346 # This will have to be fixed before we remove the experimental
1362 # gating.
1347 # gating.
1363 tracktags(tr2)
1348 tracktags(tr2)
1364 repo = reporef()
1349 repo = reporef()
1365 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1350 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1366 scmutil.enforcesinglehead(repo, tr2, desc)
1351 scmutil.enforcesinglehead(repo, tr2, desc)
1367 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1352 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1368 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1353 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1369 args = tr.hookargs.copy()
1354 args = tr.hookargs.copy()
1370 args.update(bookmarks.preparehookargs(name, old, new))
1355 args.update(bookmarks.preparehookargs(name, old, new))
1371 repo.hook('pretxnclose-bookmark', throw=True,
1356 repo.hook('pretxnclose-bookmark', throw=True,
1372 txnname=desc,
1357 txnname=desc,
1373 **pycompat.strkwargs(args))
1358 **pycompat.strkwargs(args))
1374 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1359 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1375 cl = repo.unfiltered().changelog
1360 cl = repo.unfiltered().changelog
1376 for rev, (old, new) in tr.changes['phases'].items():
1361 for rev, (old, new) in tr.changes['phases'].items():
1377 args = tr.hookargs.copy()
1362 args = tr.hookargs.copy()
1378 node = hex(cl.node(rev))
1363 node = hex(cl.node(rev))
1379 args.update(phases.preparehookargs(node, old, new))
1364 args.update(phases.preparehookargs(node, old, new))
1380 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1365 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1381 **pycompat.strkwargs(args))
1366 **pycompat.strkwargs(args))
1382
1367
1383 repo.hook('pretxnclose', throw=True,
1368 repo.hook('pretxnclose', throw=True,
1384 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1369 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1385 def releasefn(tr, success):
1370 def releasefn(tr, success):
1386 repo = reporef()
1371 repo = reporef()
1387 if success:
1372 if success:
1388 # this should be explicitly invoked here, because
1373 # this should be explicitly invoked here, because
1389 # in-memory changes aren't written out at closing
1374 # in-memory changes aren't written out at closing
1390 # transaction, if tr.addfilegenerator (via
1375 # transaction, if tr.addfilegenerator (via
1391 # dirstate.write or so) isn't invoked while
1376 # dirstate.write or so) isn't invoked while
1392 # transaction running
1377 # transaction running
1393 repo.dirstate.write(None)
1378 repo.dirstate.write(None)
1394 else:
1379 else:
1395 # discard all changes (including ones already written
1380 # discard all changes (including ones already written
1396 # out) in this transaction
1381 # out) in this transaction
1397 narrowspec.restorebackup(self, 'journal.narrowspec')
1382 narrowspec.restorebackup(self, 'journal.narrowspec')
1398 repo.dirstate.restorebackup(None, 'journal.dirstate')
1383 repo.dirstate.restorebackup(None, 'journal.dirstate')
1399
1384
1400 repo.invalidate(clearfilecache=True)
1385 repo.invalidate(clearfilecache=True)
1401
1386
1402 tr = transaction.transaction(rp, self.svfs, vfsmap,
1387 tr = transaction.transaction(rp, self.svfs, vfsmap,
1403 "journal",
1388 "journal",
1404 "undo",
1389 "undo",
1405 aftertrans(renames),
1390 aftertrans(renames),
1406 self.store.createmode,
1391 self.store.createmode,
1407 validator=validate,
1392 validator=validate,
1408 releasefn=releasefn,
1393 releasefn=releasefn,
1409 checkambigfiles=_cachedfiles,
1394 checkambigfiles=_cachedfiles,
1410 name=desc)
1395 name=desc)
1411 tr.changes['origrepolen'] = len(self)
1396 tr.changes['origrepolen'] = len(self)
1412 tr.changes['obsmarkers'] = set()
1397 tr.changes['obsmarkers'] = set()
1413 tr.changes['phases'] = {}
1398 tr.changes['phases'] = {}
1414 tr.changes['bookmarks'] = {}
1399 tr.changes['bookmarks'] = {}
1415
1400
1416 tr.hookargs['txnid'] = txnid
1401 tr.hookargs['txnid'] = txnid
1417 # note: writing the fncache only during finalize mean that the file is
1402 # note: writing the fncache only during finalize mean that the file is
1418 # outdated when running hooks. As fncache is used for streaming clone,
1403 # outdated when running hooks. As fncache is used for streaming clone,
1419 # this is not expected to break anything that happen during the hooks.
1404 # this is not expected to break anything that happen during the hooks.
1420 tr.addfinalize('flush-fncache', self.store.write)
1405 tr.addfinalize('flush-fncache', self.store.write)
1421 def txnclosehook(tr2):
1406 def txnclosehook(tr2):
1422 """To be run if transaction is successful, will schedule a hook run
1407 """To be run if transaction is successful, will schedule a hook run
1423 """
1408 """
1424 # Don't reference tr2 in hook() so we don't hold a reference.
1409 # Don't reference tr2 in hook() so we don't hold a reference.
1425 # This reduces memory consumption when there are multiple
1410 # This reduces memory consumption when there are multiple
1426 # transactions per lock. This can likely go away if issue5045
1411 # transactions per lock. This can likely go away if issue5045
1427 # fixes the function accumulation.
1412 # fixes the function accumulation.
1428 hookargs = tr2.hookargs
1413 hookargs = tr2.hookargs
1429
1414
1430 def hookfunc():
1415 def hookfunc():
1431 repo = reporef()
1416 repo = reporef()
1432 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1417 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1433 bmchanges = sorted(tr.changes['bookmarks'].items())
1418 bmchanges = sorted(tr.changes['bookmarks'].items())
1434 for name, (old, new) in bmchanges:
1419 for name, (old, new) in bmchanges:
1435 args = tr.hookargs.copy()
1420 args = tr.hookargs.copy()
1436 args.update(bookmarks.preparehookargs(name, old, new))
1421 args.update(bookmarks.preparehookargs(name, old, new))
1437 repo.hook('txnclose-bookmark', throw=False,
1422 repo.hook('txnclose-bookmark', throw=False,
1438 txnname=desc, **pycompat.strkwargs(args))
1423 txnname=desc, **pycompat.strkwargs(args))
1439
1424
1440 if hook.hashook(repo.ui, 'txnclose-phase'):
1425 if hook.hashook(repo.ui, 'txnclose-phase'):
1441 cl = repo.unfiltered().changelog
1426 cl = repo.unfiltered().changelog
1442 phasemv = sorted(tr.changes['phases'].items())
1427 phasemv = sorted(tr.changes['phases'].items())
1443 for rev, (old, new) in phasemv:
1428 for rev, (old, new) in phasemv:
1444 args = tr.hookargs.copy()
1429 args = tr.hookargs.copy()
1445 node = hex(cl.node(rev))
1430 node = hex(cl.node(rev))
1446 args.update(phases.preparehookargs(node, old, new))
1431 args.update(phases.preparehookargs(node, old, new))
1447 repo.hook('txnclose-phase', throw=False, txnname=desc,
1432 repo.hook('txnclose-phase', throw=False, txnname=desc,
1448 **pycompat.strkwargs(args))
1433 **pycompat.strkwargs(args))
1449
1434
1450 repo.hook('txnclose', throw=False, txnname=desc,
1435 repo.hook('txnclose', throw=False, txnname=desc,
1451 **pycompat.strkwargs(hookargs))
1436 **pycompat.strkwargs(hookargs))
1452 reporef()._afterlock(hookfunc)
1437 reporef()._afterlock(hookfunc)
1453 tr.addfinalize('txnclose-hook', txnclosehook)
1438 tr.addfinalize('txnclose-hook', txnclosehook)
1454 # Include a leading "-" to make it happen before the transaction summary
1439 # Include a leading "-" to make it happen before the transaction summary
1455 # reports registered via scmutil.registersummarycallback() whose names
1440 # reports registered via scmutil.registersummarycallback() whose names
1456 # are 00-txnreport etc. That way, the caches will be warm when the
1441 # are 00-txnreport etc. That way, the caches will be warm when the
1457 # callbacks run.
1442 # callbacks run.
1458 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1443 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1459 def txnaborthook(tr2):
1444 def txnaborthook(tr2):
1460 """To be run if transaction is aborted
1445 """To be run if transaction is aborted
1461 """
1446 """
1462 reporef().hook('txnabort', throw=False, txnname=desc,
1447 reporef().hook('txnabort', throw=False, txnname=desc,
1463 **pycompat.strkwargs(tr2.hookargs))
1448 **pycompat.strkwargs(tr2.hookargs))
1464 tr.addabort('txnabort-hook', txnaborthook)
1449 tr.addabort('txnabort-hook', txnaborthook)
1465 # avoid eager cache invalidation. in-memory data should be identical
1450 # avoid eager cache invalidation. in-memory data should be identical
1466 # to stored data if transaction has no error.
1451 # to stored data if transaction has no error.
1467 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1452 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1468 self._transref = weakref.ref(tr)
1453 self._transref = weakref.ref(tr)
1469 scmutil.registersummarycallback(self, tr, desc)
1454 scmutil.registersummarycallback(self, tr, desc)
1470 return tr
1455 return tr
1471
1456
1472 def _journalfiles(self):
1457 def _journalfiles(self):
1473 return ((self.svfs, 'journal'),
1458 return ((self.svfs, 'journal'),
1474 (self.vfs, 'journal.dirstate'),
1459 (self.vfs, 'journal.dirstate'),
1475 (self.vfs, 'journal.branch'),
1460 (self.vfs, 'journal.branch'),
1476 (self.vfs, 'journal.desc'),
1461 (self.vfs, 'journal.desc'),
1477 (self.vfs, 'journal.bookmarks'),
1462 (self.vfs, 'journal.bookmarks'),
1478 (self.svfs, 'journal.phaseroots'))
1463 (self.svfs, 'journal.phaseroots'))
1479
1464
1480 def undofiles(self):
1465 def undofiles(self):
1481 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1466 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1482
1467
1483 @unfilteredmethod
1468 @unfilteredmethod
1484 def _writejournal(self, desc):
1469 def _writejournal(self, desc):
1485 self.dirstate.savebackup(None, 'journal.dirstate')
1470 self.dirstate.savebackup(None, 'journal.dirstate')
1486 narrowspec.savebackup(self, 'journal.narrowspec')
1471 narrowspec.savebackup(self, 'journal.narrowspec')
1487 self.vfs.write("journal.branch",
1472 self.vfs.write("journal.branch",
1488 encoding.fromlocal(self.dirstate.branch()))
1473 encoding.fromlocal(self.dirstate.branch()))
1489 self.vfs.write("journal.desc",
1474 self.vfs.write("journal.desc",
1490 "%d\n%s\n" % (len(self), desc))
1475 "%d\n%s\n" % (len(self), desc))
1491 self.vfs.write("journal.bookmarks",
1476 self.vfs.write("journal.bookmarks",
1492 self.vfs.tryread("bookmarks"))
1477 self.vfs.tryread("bookmarks"))
1493 self.svfs.write("journal.phaseroots",
1478 self.svfs.write("journal.phaseroots",
1494 self.svfs.tryread("phaseroots"))
1479 self.svfs.tryread("phaseroots"))
1495
1480
1496 def recover(self):
1481 def recover(self):
1497 with self.lock():
1482 with self.lock():
1498 if self.svfs.exists("journal"):
1483 if self.svfs.exists("journal"):
1499 self.ui.status(_("rolling back interrupted transaction\n"))
1484 self.ui.status(_("rolling back interrupted transaction\n"))
1500 vfsmap = {'': self.svfs,
1485 vfsmap = {'': self.svfs,
1501 'plain': self.vfs,}
1486 'plain': self.vfs,}
1502 transaction.rollback(self.svfs, vfsmap, "journal",
1487 transaction.rollback(self.svfs, vfsmap, "journal",
1503 self.ui.warn,
1488 self.ui.warn,
1504 checkambigfiles=_cachedfiles)
1489 checkambigfiles=_cachedfiles)
1505 self.invalidate()
1490 self.invalidate()
1506 return True
1491 return True
1507 else:
1492 else:
1508 self.ui.warn(_("no interrupted transaction available\n"))
1493 self.ui.warn(_("no interrupted transaction available\n"))
1509 return False
1494 return False
1510
1495
1511 def rollback(self, dryrun=False, force=False):
1496 def rollback(self, dryrun=False, force=False):
1512 wlock = lock = dsguard = None
1497 wlock = lock = dsguard = None
1513 try:
1498 try:
1514 wlock = self.wlock()
1499 wlock = self.wlock()
1515 lock = self.lock()
1500 lock = self.lock()
1516 if self.svfs.exists("undo"):
1501 if self.svfs.exists("undo"):
1517 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1502 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1518
1503
1519 return self._rollback(dryrun, force, dsguard)
1504 return self._rollback(dryrun, force, dsguard)
1520 else:
1505 else:
1521 self.ui.warn(_("no rollback information available\n"))
1506 self.ui.warn(_("no rollback information available\n"))
1522 return 1
1507 return 1
1523 finally:
1508 finally:
1524 release(dsguard, lock, wlock)
1509 release(dsguard, lock, wlock)
1525
1510
1526 @unfilteredmethod # Until we get smarter cache management
1511 @unfilteredmethod # Until we get smarter cache management
1527 def _rollback(self, dryrun, force, dsguard):
1512 def _rollback(self, dryrun, force, dsguard):
1528 ui = self.ui
1513 ui = self.ui
1529 try:
1514 try:
1530 args = self.vfs.read('undo.desc').splitlines()
1515 args = self.vfs.read('undo.desc').splitlines()
1531 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1516 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1532 if len(args) >= 3:
1517 if len(args) >= 3:
1533 detail = args[2]
1518 detail = args[2]
1534 oldtip = oldlen - 1
1519 oldtip = oldlen - 1
1535
1520
1536 if detail and ui.verbose:
1521 if detail and ui.verbose:
1537 msg = (_('repository tip rolled back to revision %d'
1522 msg = (_('repository tip rolled back to revision %d'
1538 ' (undo %s: %s)\n')
1523 ' (undo %s: %s)\n')
1539 % (oldtip, desc, detail))
1524 % (oldtip, desc, detail))
1540 else:
1525 else:
1541 msg = (_('repository tip rolled back to revision %d'
1526 msg = (_('repository tip rolled back to revision %d'
1542 ' (undo %s)\n')
1527 ' (undo %s)\n')
1543 % (oldtip, desc))
1528 % (oldtip, desc))
1544 except IOError:
1529 except IOError:
1545 msg = _('rolling back unknown transaction\n')
1530 msg = _('rolling back unknown transaction\n')
1546 desc = None
1531 desc = None
1547
1532
1548 if not force and self['.'] != self['tip'] and desc == 'commit':
1533 if not force and self['.'] != self['tip'] and desc == 'commit':
1549 raise error.Abort(
1534 raise error.Abort(
1550 _('rollback of last commit while not checked out '
1535 _('rollback of last commit while not checked out '
1551 'may lose data'), hint=_('use -f to force'))
1536 'may lose data'), hint=_('use -f to force'))
1552
1537
1553 ui.status(msg)
1538 ui.status(msg)
1554 if dryrun:
1539 if dryrun:
1555 return 0
1540 return 0
1556
1541
1557 parents = self.dirstate.parents()
1542 parents = self.dirstate.parents()
1558 self.destroying()
1543 self.destroying()
1559 vfsmap = {'plain': self.vfs, '': self.svfs}
1544 vfsmap = {'plain': self.vfs, '': self.svfs}
1560 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1545 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1561 checkambigfiles=_cachedfiles)
1546 checkambigfiles=_cachedfiles)
1562 if self.vfs.exists('undo.bookmarks'):
1547 if self.vfs.exists('undo.bookmarks'):
1563 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1548 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1564 if self.svfs.exists('undo.phaseroots'):
1549 if self.svfs.exists('undo.phaseroots'):
1565 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1550 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1566 self.invalidate()
1551 self.invalidate()
1567
1552
1568 parentgone = (parents[0] not in self.changelog.nodemap or
1553 parentgone = (parents[0] not in self.changelog.nodemap or
1569 parents[1] not in self.changelog.nodemap)
1554 parents[1] not in self.changelog.nodemap)
1570 if parentgone:
1555 if parentgone:
1571 # prevent dirstateguard from overwriting already restored one
1556 # prevent dirstateguard from overwriting already restored one
1572 dsguard.close()
1557 dsguard.close()
1573
1558
1574 narrowspec.restorebackup(self, 'undo.narrowspec')
1559 narrowspec.restorebackup(self, 'undo.narrowspec')
1575 self.dirstate.restorebackup(None, 'undo.dirstate')
1560 self.dirstate.restorebackup(None, 'undo.dirstate')
1576 try:
1561 try:
1577 branch = self.vfs.read('undo.branch')
1562 branch = self.vfs.read('undo.branch')
1578 self.dirstate.setbranch(encoding.tolocal(branch))
1563 self.dirstate.setbranch(encoding.tolocal(branch))
1579 except IOError:
1564 except IOError:
1580 ui.warn(_('named branch could not be reset: '
1565 ui.warn(_('named branch could not be reset: '
1581 'current branch is still \'%s\'\n')
1566 'current branch is still \'%s\'\n')
1582 % self.dirstate.branch())
1567 % self.dirstate.branch())
1583
1568
1584 parents = tuple([p.rev() for p in self[None].parents()])
1569 parents = tuple([p.rev() for p in self[None].parents()])
1585 if len(parents) > 1:
1570 if len(parents) > 1:
1586 ui.status(_('working directory now based on '
1571 ui.status(_('working directory now based on '
1587 'revisions %d and %d\n') % parents)
1572 'revisions %d and %d\n') % parents)
1588 else:
1573 else:
1589 ui.status(_('working directory now based on '
1574 ui.status(_('working directory now based on '
1590 'revision %d\n') % parents)
1575 'revision %d\n') % parents)
1591 mergemod.mergestate.clean(self, self['.'].node())
1576 mergemod.mergestate.clean(self, self['.'].node())
1592
1577
1593 # TODO: if we know which new heads may result from this rollback, pass
1578 # TODO: if we know which new heads may result from this rollback, pass
1594 # them to destroy(), which will prevent the branchhead cache from being
1579 # them to destroy(), which will prevent the branchhead cache from being
1595 # invalidated.
1580 # invalidated.
1596 self.destroyed()
1581 self.destroyed()
1597 return 0
1582 return 0
1598
1583
1599 def _buildcacheupdater(self, newtransaction):
1584 def _buildcacheupdater(self, newtransaction):
1600 """called during transaction to build the callback updating cache
1585 """called during transaction to build the callback updating cache
1601
1586
1602 Lives on the repository to help extension who might want to augment
1587 Lives on the repository to help extension who might want to augment
1603 this logic. For this purpose, the created transaction is passed to the
1588 this logic. For this purpose, the created transaction is passed to the
1604 method.
1589 method.
1605 """
1590 """
1606 # we must avoid cyclic reference between repo and transaction.
1591 # we must avoid cyclic reference between repo and transaction.
1607 reporef = weakref.ref(self)
1592 reporef = weakref.ref(self)
1608 def updater(tr):
1593 def updater(tr):
1609 repo = reporef()
1594 repo = reporef()
1610 repo.updatecaches(tr)
1595 repo.updatecaches(tr)
1611 return updater
1596 return updater
1612
1597
1613 @unfilteredmethod
1598 @unfilteredmethod
1614 def updatecaches(self, tr=None, full=False):
1599 def updatecaches(self, tr=None, full=False):
1615 """warm appropriate caches
1600 """warm appropriate caches
1616
1601
1617 If this function is called after a transaction closed. The transaction
1602 If this function is called after a transaction closed. The transaction
1618 will be available in the 'tr' argument. This can be used to selectively
1603 will be available in the 'tr' argument. This can be used to selectively
1619 update caches relevant to the changes in that transaction.
1604 update caches relevant to the changes in that transaction.
1620
1605
1621 If 'full' is set, make sure all caches the function knows about have
1606 If 'full' is set, make sure all caches the function knows about have
1622 up-to-date data. Even the ones usually loaded more lazily.
1607 up-to-date data. Even the ones usually loaded more lazily.
1623 """
1608 """
1624 if tr is not None and tr.hookargs.get('source') == 'strip':
1609 if tr is not None and tr.hookargs.get('source') == 'strip':
1625 # During strip, many caches are invalid but
1610 # During strip, many caches are invalid but
1626 # later call to `destroyed` will refresh them.
1611 # later call to `destroyed` will refresh them.
1627 return
1612 return
1628
1613
1629 if tr is None or tr.changes['origrepolen'] < len(self):
1614 if tr is None or tr.changes['origrepolen'] < len(self):
1630 # updating the unfiltered branchmap should refresh all the others,
1615 # updating the unfiltered branchmap should refresh all the others,
1631 self.ui.debug('updating the branch cache\n')
1616 self.ui.debug('updating the branch cache\n')
1632 branchmap.updatecache(self.filtered('served'))
1617 branchmap.updatecache(self.filtered('served'))
1633
1618
1634 if full:
1619 if full:
1635 rbc = self.revbranchcache()
1620 rbc = self.revbranchcache()
1636 for r in self.changelog:
1621 for r in self.changelog:
1637 rbc.branchinfo(r)
1622 rbc.branchinfo(r)
1638 rbc.write()
1623 rbc.write()
1639
1624
1640 # ensure the working copy parents are in the manifestfulltextcache
1625 # ensure the working copy parents are in the manifestfulltextcache
1641 for ctx in self['.'].parents():
1626 for ctx in self['.'].parents():
1642 ctx.manifest() # accessing the manifest is enough
1627 ctx.manifest() # accessing the manifest is enough
1643
1628
1644 def invalidatecaches(self):
1629 def invalidatecaches(self):
1645
1630
1646 if '_tagscache' in vars(self):
1631 if '_tagscache' in vars(self):
1647 # can't use delattr on proxy
1632 # can't use delattr on proxy
1648 del self.__dict__['_tagscache']
1633 del self.__dict__['_tagscache']
1649
1634
1650 self.unfiltered()._branchcaches.clear()
1635 self.unfiltered()._branchcaches.clear()
1651 self.invalidatevolatilesets()
1636 self.invalidatevolatilesets()
1652 self._sparsesignaturecache.clear()
1637 self._sparsesignaturecache.clear()
1653
1638
1654 def invalidatevolatilesets(self):
1639 def invalidatevolatilesets(self):
1655 self.filteredrevcache.clear()
1640 self.filteredrevcache.clear()
1656 obsolete.clearobscaches(self)
1641 obsolete.clearobscaches(self)
1657
1642
1658 def invalidatedirstate(self):
1643 def invalidatedirstate(self):
1659 '''Invalidates the dirstate, causing the next call to dirstate
1644 '''Invalidates the dirstate, causing the next call to dirstate
1660 to check if it was modified since the last time it was read,
1645 to check if it was modified since the last time it was read,
1661 rereading it if it has.
1646 rereading it if it has.
1662
1647
1663 This is different to dirstate.invalidate() that it doesn't always
1648 This is different to dirstate.invalidate() that it doesn't always
1664 rereads the dirstate. Use dirstate.invalidate() if you want to
1649 rereads the dirstate. Use dirstate.invalidate() if you want to
1665 explicitly read the dirstate again (i.e. restoring it to a previous
1650 explicitly read the dirstate again (i.e. restoring it to a previous
1666 known good state).'''
1651 known good state).'''
1667 if hasunfilteredcache(self, 'dirstate'):
1652 if hasunfilteredcache(self, 'dirstate'):
1668 for k in self.dirstate._filecache:
1653 for k in self.dirstate._filecache:
1669 try:
1654 try:
1670 delattr(self.dirstate, k)
1655 delattr(self.dirstate, k)
1671 except AttributeError:
1656 except AttributeError:
1672 pass
1657 pass
1673 delattr(self.unfiltered(), 'dirstate')
1658 delattr(self.unfiltered(), 'dirstate')
1674
1659
1675 def invalidate(self, clearfilecache=False):
1660 def invalidate(self, clearfilecache=False):
1676 '''Invalidates both store and non-store parts other than dirstate
1661 '''Invalidates both store and non-store parts other than dirstate
1677
1662
1678 If a transaction is running, invalidation of store is omitted,
1663 If a transaction is running, invalidation of store is omitted,
1679 because discarding in-memory changes might cause inconsistency
1664 because discarding in-memory changes might cause inconsistency
1680 (e.g. incomplete fncache causes unintentional failure, but
1665 (e.g. incomplete fncache causes unintentional failure, but
1681 redundant one doesn't).
1666 redundant one doesn't).
1682 '''
1667 '''
1683 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1668 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1684 for k in list(self._filecache.keys()):
1669 for k in list(self._filecache.keys()):
1685 # dirstate is invalidated separately in invalidatedirstate()
1670 # dirstate is invalidated separately in invalidatedirstate()
1686 if k == 'dirstate':
1671 if k == 'dirstate':
1687 continue
1672 continue
1688 if (k == 'changelog' and
1673 if (k == 'changelog' and
1689 self.currenttransaction() and
1674 self.currenttransaction() and
1690 self.changelog._delayed):
1675 self.changelog._delayed):
1691 # The changelog object may store unwritten revisions. We don't
1676 # The changelog object may store unwritten revisions. We don't
1692 # want to lose them.
1677 # want to lose them.
1693 # TODO: Solve the problem instead of working around it.
1678 # TODO: Solve the problem instead of working around it.
1694 continue
1679 continue
1695
1680
1696 if clearfilecache:
1681 if clearfilecache:
1697 del self._filecache[k]
1682 del self._filecache[k]
1698 try:
1683 try:
1699 delattr(unfiltered, k)
1684 delattr(unfiltered, k)
1700 except AttributeError:
1685 except AttributeError:
1701 pass
1686 pass
1702 self.invalidatecaches()
1687 self.invalidatecaches()
1703 if not self.currenttransaction():
1688 if not self.currenttransaction():
1704 # TODO: Changing contents of store outside transaction
1689 # TODO: Changing contents of store outside transaction
1705 # causes inconsistency. We should make in-memory store
1690 # causes inconsistency. We should make in-memory store
1706 # changes detectable, and abort if changed.
1691 # changes detectable, and abort if changed.
1707 self.store.invalidatecaches()
1692 self.store.invalidatecaches()
1708
1693
1709 def invalidateall(self):
1694 def invalidateall(self):
1710 '''Fully invalidates both store and non-store parts, causing the
1695 '''Fully invalidates both store and non-store parts, causing the
1711 subsequent operation to reread any outside changes.'''
1696 subsequent operation to reread any outside changes.'''
1712 # extension should hook this to invalidate its caches
1697 # extension should hook this to invalidate its caches
1713 self.invalidate()
1698 self.invalidate()
1714 self.invalidatedirstate()
1699 self.invalidatedirstate()
1715
1700
1716 @unfilteredmethod
1701 @unfilteredmethod
1717 def _refreshfilecachestats(self, tr):
1702 def _refreshfilecachestats(self, tr):
1718 """Reload stats of cached files so that they are flagged as valid"""
1703 """Reload stats of cached files so that they are flagged as valid"""
1719 for k, ce in self._filecache.items():
1704 for k, ce in self._filecache.items():
1720 k = pycompat.sysstr(k)
1705 k = pycompat.sysstr(k)
1721 if k == r'dirstate' or k not in self.__dict__:
1706 if k == r'dirstate' or k not in self.__dict__:
1722 continue
1707 continue
1723 ce.refresh()
1708 ce.refresh()
1724
1709
1725 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1710 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1726 inheritchecker=None, parentenvvar=None):
1711 inheritchecker=None, parentenvvar=None):
1727 parentlock = None
1712 parentlock = None
1728 # the contents of parentenvvar are used by the underlying lock to
1713 # the contents of parentenvvar are used by the underlying lock to
1729 # determine whether it can be inherited
1714 # determine whether it can be inherited
1730 if parentenvvar is not None:
1715 if parentenvvar is not None:
1731 parentlock = encoding.environ.get(parentenvvar)
1716 parentlock = encoding.environ.get(parentenvvar)
1732
1717
1733 timeout = 0
1718 timeout = 0
1734 warntimeout = 0
1719 warntimeout = 0
1735 if wait:
1720 if wait:
1736 timeout = self.ui.configint("ui", "timeout")
1721 timeout = self.ui.configint("ui", "timeout")
1737 warntimeout = self.ui.configint("ui", "timeout.warn")
1722 warntimeout = self.ui.configint("ui", "timeout.warn")
1738 # internal config: ui.signal-safe-lock
1723 # internal config: ui.signal-safe-lock
1739 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1724 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1740
1725
1741 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1726 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1742 releasefn=releasefn,
1727 releasefn=releasefn,
1743 acquirefn=acquirefn, desc=desc,
1728 acquirefn=acquirefn, desc=desc,
1744 inheritchecker=inheritchecker,
1729 inheritchecker=inheritchecker,
1745 parentlock=parentlock,
1730 parentlock=parentlock,
1746 signalsafe=signalsafe)
1731 signalsafe=signalsafe)
1747 return l
1732 return l
1748
1733
1749 def _afterlock(self, callback):
1734 def _afterlock(self, callback):
1750 """add a callback to be run when the repository is fully unlocked
1735 """add a callback to be run when the repository is fully unlocked
1751
1736
1752 The callback will be executed when the outermost lock is released
1737 The callback will be executed when the outermost lock is released
1753 (with wlock being higher level than 'lock')."""
1738 (with wlock being higher level than 'lock')."""
1754 for ref in (self._wlockref, self._lockref):
1739 for ref in (self._wlockref, self._lockref):
1755 l = ref and ref()
1740 l = ref and ref()
1756 if l and l.held:
1741 if l and l.held:
1757 l.postrelease.append(callback)
1742 l.postrelease.append(callback)
1758 break
1743 break
1759 else: # no lock have been found.
1744 else: # no lock have been found.
1760 callback()
1745 callback()
1761
1746
1762 def lock(self, wait=True):
1747 def lock(self, wait=True):
1763 '''Lock the repository store (.hg/store) and return a weak reference
1748 '''Lock the repository store (.hg/store) and return a weak reference
1764 to the lock. Use this before modifying the store (e.g. committing or
1749 to the lock. Use this before modifying the store (e.g. committing or
1765 stripping). If you are opening a transaction, get a lock as well.)
1750 stripping). If you are opening a transaction, get a lock as well.)
1766
1751
1767 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1752 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1768 'wlock' first to avoid a dead-lock hazard.'''
1753 'wlock' first to avoid a dead-lock hazard.'''
1769 l = self._currentlock(self._lockref)
1754 l = self._currentlock(self._lockref)
1770 if l is not None:
1755 if l is not None:
1771 l.lock()
1756 l.lock()
1772 return l
1757 return l
1773
1758
1774 l = self._lock(self.svfs, "lock", wait, None,
1759 l = self._lock(self.svfs, "lock", wait, None,
1775 self.invalidate, _('repository %s') % self.origroot)
1760 self.invalidate, _('repository %s') % self.origroot)
1776 self._lockref = weakref.ref(l)
1761 self._lockref = weakref.ref(l)
1777 return l
1762 return l
1778
1763
1779 def _wlockchecktransaction(self):
1764 def _wlockchecktransaction(self):
1780 if self.currenttransaction() is not None:
1765 if self.currenttransaction() is not None:
1781 raise error.LockInheritanceContractViolation(
1766 raise error.LockInheritanceContractViolation(
1782 'wlock cannot be inherited in the middle of a transaction')
1767 'wlock cannot be inherited in the middle of a transaction')
1783
1768
1784 def wlock(self, wait=True):
1769 def wlock(self, wait=True):
1785 '''Lock the non-store parts of the repository (everything under
1770 '''Lock the non-store parts of the repository (everything under
1786 .hg except .hg/store) and return a weak reference to the lock.
1771 .hg except .hg/store) and return a weak reference to the lock.
1787
1772
1788 Use this before modifying files in .hg.
1773 Use this before modifying files in .hg.
1789
1774
1790 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1775 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1791 'wlock' first to avoid a dead-lock hazard.'''
1776 'wlock' first to avoid a dead-lock hazard.'''
1792 l = self._wlockref and self._wlockref()
1777 l = self._wlockref and self._wlockref()
1793 if l is not None and l.held:
1778 if l is not None and l.held:
1794 l.lock()
1779 l.lock()
1795 return l
1780 return l
1796
1781
1797 # We do not need to check for non-waiting lock acquisition. Such
1782 # We do not need to check for non-waiting lock acquisition. Such
1798 # acquisition would not cause dead-lock as they would just fail.
1783 # acquisition would not cause dead-lock as they would just fail.
1799 if wait and (self.ui.configbool('devel', 'all-warnings')
1784 if wait and (self.ui.configbool('devel', 'all-warnings')
1800 or self.ui.configbool('devel', 'check-locks')):
1785 or self.ui.configbool('devel', 'check-locks')):
1801 if self._currentlock(self._lockref) is not None:
1786 if self._currentlock(self._lockref) is not None:
1802 self.ui.develwarn('"wlock" acquired after "lock"')
1787 self.ui.develwarn('"wlock" acquired after "lock"')
1803
1788
1804 def unlock():
1789 def unlock():
1805 if self.dirstate.pendingparentchange():
1790 if self.dirstate.pendingparentchange():
1806 self.dirstate.invalidate()
1791 self.dirstate.invalidate()
1807 else:
1792 else:
1808 self.dirstate.write(None)
1793 self.dirstate.write(None)
1809
1794
1810 self._filecache['dirstate'].refresh()
1795 self._filecache['dirstate'].refresh()
1811
1796
1812 l = self._lock(self.vfs, "wlock", wait, unlock,
1797 l = self._lock(self.vfs, "wlock", wait, unlock,
1813 self.invalidatedirstate, _('working directory of %s') %
1798 self.invalidatedirstate, _('working directory of %s') %
1814 self.origroot,
1799 self.origroot,
1815 inheritchecker=self._wlockchecktransaction,
1800 inheritchecker=self._wlockchecktransaction,
1816 parentenvvar='HG_WLOCK_LOCKER')
1801 parentenvvar='HG_WLOCK_LOCKER')
1817 self._wlockref = weakref.ref(l)
1802 self._wlockref = weakref.ref(l)
1818 return l
1803 return l
1819
1804
1820 def _currentlock(self, lockref):
1805 def _currentlock(self, lockref):
1821 """Returns the lock if it's held, or None if it's not."""
1806 """Returns the lock if it's held, or None if it's not."""
1822 if lockref is None:
1807 if lockref is None:
1823 return None
1808 return None
1824 l = lockref()
1809 l = lockref()
1825 if l is None or not l.held:
1810 if l is None or not l.held:
1826 return None
1811 return None
1827 return l
1812 return l
1828
1813
1829 def currentwlock(self):
1814 def currentwlock(self):
1830 """Returns the wlock if it's held, or None if it's not."""
1815 """Returns the wlock if it's held, or None if it's not."""
1831 return self._currentlock(self._wlockref)
1816 return self._currentlock(self._wlockref)
1832
1817
1833 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1818 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1834 """
1819 """
1835 commit an individual file as part of a larger transaction
1820 commit an individual file as part of a larger transaction
1836 """
1821 """
1837
1822
1838 fname = fctx.path()
1823 fname = fctx.path()
1839 fparent1 = manifest1.get(fname, nullid)
1824 fparent1 = manifest1.get(fname, nullid)
1840 fparent2 = manifest2.get(fname, nullid)
1825 fparent2 = manifest2.get(fname, nullid)
1841 if isinstance(fctx, context.filectx):
1826 if isinstance(fctx, context.filectx):
1842 node = fctx.filenode()
1827 node = fctx.filenode()
1843 if node in [fparent1, fparent2]:
1828 if node in [fparent1, fparent2]:
1844 self.ui.debug('reusing %s filelog entry\n' % fname)
1829 self.ui.debug('reusing %s filelog entry\n' % fname)
1845 if manifest1.flags(fname) != fctx.flags():
1830 if manifest1.flags(fname) != fctx.flags():
1846 changelist.append(fname)
1831 changelist.append(fname)
1847 return node
1832 return node
1848
1833
1849 flog = self.file(fname)
1834 flog = self.file(fname)
1850 meta = {}
1835 meta = {}
1851 copy = fctx.renamed()
1836 copy = fctx.renamed()
1852 if copy and copy[0] != fname:
1837 if copy and copy[0] != fname:
1853 # Mark the new revision of this file as a copy of another
1838 # Mark the new revision of this file as a copy of another
1854 # file. This copy data will effectively act as a parent
1839 # file. This copy data will effectively act as a parent
1855 # of this new revision. If this is a merge, the first
1840 # of this new revision. If this is a merge, the first
1856 # parent will be the nullid (meaning "look up the copy data")
1841 # parent will be the nullid (meaning "look up the copy data")
1857 # and the second one will be the other parent. For example:
1842 # and the second one will be the other parent. For example:
1858 #
1843 #
1859 # 0 --- 1 --- 3 rev1 changes file foo
1844 # 0 --- 1 --- 3 rev1 changes file foo
1860 # \ / rev2 renames foo to bar and changes it
1845 # \ / rev2 renames foo to bar and changes it
1861 # \- 2 -/ rev3 should have bar with all changes and
1846 # \- 2 -/ rev3 should have bar with all changes and
1862 # should record that bar descends from
1847 # should record that bar descends from
1863 # bar in rev2 and foo in rev1
1848 # bar in rev2 and foo in rev1
1864 #
1849 #
1865 # this allows this merge to succeed:
1850 # this allows this merge to succeed:
1866 #
1851 #
1867 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1852 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1868 # \ / merging rev3 and rev4 should use bar@rev2
1853 # \ / merging rev3 and rev4 should use bar@rev2
1869 # \- 2 --- 4 as the merge base
1854 # \- 2 --- 4 as the merge base
1870 #
1855 #
1871
1856
1872 cfname = copy[0]
1857 cfname = copy[0]
1873 crev = manifest1.get(cfname)
1858 crev = manifest1.get(cfname)
1874 newfparent = fparent2
1859 newfparent = fparent2
1875
1860
1876 if manifest2: # branch merge
1861 if manifest2: # branch merge
1877 if fparent2 == nullid or crev is None: # copied on remote side
1862 if fparent2 == nullid or crev is None: # copied on remote side
1878 if cfname in manifest2:
1863 if cfname in manifest2:
1879 crev = manifest2[cfname]
1864 crev = manifest2[cfname]
1880 newfparent = fparent1
1865 newfparent = fparent1
1881
1866
1882 # Here, we used to search backwards through history to try to find
1867 # Here, we used to search backwards through history to try to find
1883 # where the file copy came from if the source of a copy was not in
1868 # where the file copy came from if the source of a copy was not in
1884 # the parent directory. However, this doesn't actually make sense to
1869 # the parent directory. However, this doesn't actually make sense to
1885 # do (what does a copy from something not in your working copy even
1870 # do (what does a copy from something not in your working copy even
1886 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1871 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1887 # the user that copy information was dropped, so if they didn't
1872 # the user that copy information was dropped, so if they didn't
1888 # expect this outcome it can be fixed, but this is the correct
1873 # expect this outcome it can be fixed, but this is the correct
1889 # behavior in this circumstance.
1874 # behavior in this circumstance.
1890
1875
1891 if crev:
1876 if crev:
1892 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1877 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1893 meta["copy"] = cfname
1878 meta["copy"] = cfname
1894 meta["copyrev"] = hex(crev)
1879 meta["copyrev"] = hex(crev)
1895 fparent1, fparent2 = nullid, newfparent
1880 fparent1, fparent2 = nullid, newfparent
1896 else:
1881 else:
1897 self.ui.warn(_("warning: can't find ancestor for '%s' "
1882 self.ui.warn(_("warning: can't find ancestor for '%s' "
1898 "copied from '%s'!\n") % (fname, cfname))
1883 "copied from '%s'!\n") % (fname, cfname))
1899
1884
1900 elif fparent1 == nullid:
1885 elif fparent1 == nullid:
1901 fparent1, fparent2 = fparent2, nullid
1886 fparent1, fparent2 = fparent2, nullid
1902 elif fparent2 != nullid:
1887 elif fparent2 != nullid:
1903 # is one parent an ancestor of the other?
1888 # is one parent an ancestor of the other?
1904 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1889 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1905 if fparent1 in fparentancestors:
1890 if fparent1 in fparentancestors:
1906 fparent1, fparent2 = fparent2, nullid
1891 fparent1, fparent2 = fparent2, nullid
1907 elif fparent2 in fparentancestors:
1892 elif fparent2 in fparentancestors:
1908 fparent2 = nullid
1893 fparent2 = nullid
1909
1894
1910 # is the file changed?
1895 # is the file changed?
1911 text = fctx.data()
1896 text = fctx.data()
1912 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1897 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1913 changelist.append(fname)
1898 changelist.append(fname)
1914 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1899 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1915 # are just the flags changed during merge?
1900 # are just the flags changed during merge?
1916 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1901 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1917 changelist.append(fname)
1902 changelist.append(fname)
1918
1903
1919 return fparent1
1904 return fparent1
1920
1905
1921 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1906 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1922 """check for commit arguments that aren't committable"""
1907 """check for commit arguments that aren't committable"""
1923 if match.isexact() or match.prefix():
1908 if match.isexact() or match.prefix():
1924 matched = set(status.modified + status.added + status.removed)
1909 matched = set(status.modified + status.added + status.removed)
1925
1910
1926 for f in match.files():
1911 for f in match.files():
1927 f = self.dirstate.normalize(f)
1912 f = self.dirstate.normalize(f)
1928 if f == '.' or f in matched or f in wctx.substate:
1913 if f == '.' or f in matched or f in wctx.substate:
1929 continue
1914 continue
1930 if f in status.deleted:
1915 if f in status.deleted:
1931 fail(f, _('file not found!'))
1916 fail(f, _('file not found!'))
1932 if f in vdirs: # visited directory
1917 if f in vdirs: # visited directory
1933 d = f + '/'
1918 d = f + '/'
1934 for mf in matched:
1919 for mf in matched:
1935 if mf.startswith(d):
1920 if mf.startswith(d):
1936 break
1921 break
1937 else:
1922 else:
1938 fail(f, _("no match under directory!"))
1923 fail(f, _("no match under directory!"))
1939 elif f not in self.dirstate:
1924 elif f not in self.dirstate:
1940 fail(f, _("file not tracked!"))
1925 fail(f, _("file not tracked!"))
1941
1926
1942 @unfilteredmethod
1927 @unfilteredmethod
1943 def commit(self, text="", user=None, date=None, match=None, force=False,
1928 def commit(self, text="", user=None, date=None, match=None, force=False,
1944 editor=False, extra=None):
1929 editor=False, extra=None):
1945 """Add a new revision to current repository.
1930 """Add a new revision to current repository.
1946
1931
1947 Revision information is gathered from the working directory,
1932 Revision information is gathered from the working directory,
1948 match can be used to filter the committed files. If editor is
1933 match can be used to filter the committed files. If editor is
1949 supplied, it is called to get a commit message.
1934 supplied, it is called to get a commit message.
1950 """
1935 """
1951 if extra is None:
1936 if extra is None:
1952 extra = {}
1937 extra = {}
1953
1938
1954 def fail(f, msg):
1939 def fail(f, msg):
1955 raise error.Abort('%s: %s' % (f, msg))
1940 raise error.Abort('%s: %s' % (f, msg))
1956
1941
1957 if not match:
1942 if not match:
1958 match = matchmod.always(self.root, '')
1943 match = matchmod.always(self.root, '')
1959
1944
1960 if not force:
1945 if not force:
1961 vdirs = []
1946 vdirs = []
1962 match.explicitdir = vdirs.append
1947 match.explicitdir = vdirs.append
1963 match.bad = fail
1948 match.bad = fail
1964
1949
1965 wlock = lock = tr = None
1950 wlock = lock = tr = None
1966 try:
1951 try:
1967 wlock = self.wlock()
1952 wlock = self.wlock()
1968 lock = self.lock() # for recent changelog (see issue4368)
1953 lock = self.lock() # for recent changelog (see issue4368)
1969
1954
1970 wctx = self[None]
1955 wctx = self[None]
1971 merge = len(wctx.parents()) > 1
1956 merge = len(wctx.parents()) > 1
1972
1957
1973 if not force and merge and not match.always():
1958 if not force and merge and not match.always():
1974 raise error.Abort(_('cannot partially commit a merge '
1959 raise error.Abort(_('cannot partially commit a merge '
1975 '(do not specify files or patterns)'))
1960 '(do not specify files or patterns)'))
1976
1961
1977 status = self.status(match=match, clean=force)
1962 status = self.status(match=match, clean=force)
1978 if force:
1963 if force:
1979 status.modified.extend(status.clean) # mq may commit clean files
1964 status.modified.extend(status.clean) # mq may commit clean files
1980
1965
1981 # check subrepos
1966 # check subrepos
1982 subs, commitsubs, newstate = subrepoutil.precommit(
1967 subs, commitsubs, newstate = subrepoutil.precommit(
1983 self.ui, wctx, status, match, force=force)
1968 self.ui, wctx, status, match, force=force)
1984
1969
1985 # make sure all explicit patterns are matched
1970 # make sure all explicit patterns are matched
1986 if not force:
1971 if not force:
1987 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1972 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1988
1973
1989 cctx = context.workingcommitctx(self, status,
1974 cctx = context.workingcommitctx(self, status,
1990 text, user, date, extra)
1975 text, user, date, extra)
1991
1976
1992 # internal config: ui.allowemptycommit
1977 # internal config: ui.allowemptycommit
1993 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1978 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1994 or extra.get('close') or merge or cctx.files()
1979 or extra.get('close') or merge or cctx.files()
1995 or self.ui.configbool('ui', 'allowemptycommit'))
1980 or self.ui.configbool('ui', 'allowemptycommit'))
1996 if not allowemptycommit:
1981 if not allowemptycommit:
1997 return None
1982 return None
1998
1983
1999 if merge and cctx.deleted():
1984 if merge and cctx.deleted():
2000 raise error.Abort(_("cannot commit merge with missing files"))
1985 raise error.Abort(_("cannot commit merge with missing files"))
2001
1986
2002 ms = mergemod.mergestate.read(self)
1987 ms = mergemod.mergestate.read(self)
2003 mergeutil.checkunresolved(ms)
1988 mergeutil.checkunresolved(ms)
2004
1989
2005 if editor:
1990 if editor:
2006 cctx._text = editor(self, cctx, subs)
1991 cctx._text = editor(self, cctx, subs)
2007 edited = (text != cctx._text)
1992 edited = (text != cctx._text)
2008
1993
2009 # Save commit message in case this transaction gets rolled back
1994 # Save commit message in case this transaction gets rolled back
2010 # (e.g. by a pretxncommit hook). Leave the content alone on
1995 # (e.g. by a pretxncommit hook). Leave the content alone on
2011 # the assumption that the user will use the same editor again.
1996 # the assumption that the user will use the same editor again.
2012 msgfn = self.savecommitmessage(cctx._text)
1997 msgfn = self.savecommitmessage(cctx._text)
2013
1998
2014 # commit subs and write new state
1999 # commit subs and write new state
2015 if subs:
2000 if subs:
2016 for s in sorted(commitsubs):
2001 for s in sorted(commitsubs):
2017 sub = wctx.sub(s)
2002 sub = wctx.sub(s)
2018 self.ui.status(_('committing subrepository %s\n') %
2003 self.ui.status(_('committing subrepository %s\n') %
2019 subrepoutil.subrelpath(sub))
2004 subrepoutil.subrelpath(sub))
2020 sr = sub.commit(cctx._text, user, date)
2005 sr = sub.commit(cctx._text, user, date)
2021 newstate[s] = (newstate[s][0], sr)
2006 newstate[s] = (newstate[s][0], sr)
2022 subrepoutil.writestate(self, newstate)
2007 subrepoutil.writestate(self, newstate)
2023
2008
2024 p1, p2 = self.dirstate.parents()
2009 p1, p2 = self.dirstate.parents()
2025 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2010 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2026 try:
2011 try:
2027 self.hook("precommit", throw=True, parent1=hookp1,
2012 self.hook("precommit", throw=True, parent1=hookp1,
2028 parent2=hookp2)
2013 parent2=hookp2)
2029 tr = self.transaction('commit')
2014 tr = self.transaction('commit')
2030 ret = self.commitctx(cctx, True)
2015 ret = self.commitctx(cctx, True)
2031 except: # re-raises
2016 except: # re-raises
2032 if edited:
2017 if edited:
2033 self.ui.write(
2018 self.ui.write(
2034 _('note: commit message saved in %s\n') % msgfn)
2019 _('note: commit message saved in %s\n') % msgfn)
2035 raise
2020 raise
2036 # update bookmarks, dirstate and mergestate
2021 # update bookmarks, dirstate and mergestate
2037 bookmarks.update(self, [p1, p2], ret)
2022 bookmarks.update(self, [p1, p2], ret)
2038 cctx.markcommitted(ret)
2023 cctx.markcommitted(ret)
2039 ms.reset()
2024 ms.reset()
2040 tr.close()
2025 tr.close()
2041
2026
2042 finally:
2027 finally:
2043 lockmod.release(tr, lock, wlock)
2028 lockmod.release(tr, lock, wlock)
2044
2029
2045 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2030 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2046 # hack for command that use a temporary commit (eg: histedit)
2031 # hack for command that use a temporary commit (eg: histedit)
2047 # temporary commit got stripped before hook release
2032 # temporary commit got stripped before hook release
2048 if self.changelog.hasnode(ret):
2033 if self.changelog.hasnode(ret):
2049 self.hook("commit", node=node, parent1=parent1,
2034 self.hook("commit", node=node, parent1=parent1,
2050 parent2=parent2)
2035 parent2=parent2)
2051 self._afterlock(commithook)
2036 self._afterlock(commithook)
2052 return ret
2037 return ret
2053
2038
2054 @unfilteredmethod
2039 @unfilteredmethod
2055 def commitctx(self, ctx, error=False):
2040 def commitctx(self, ctx, error=False):
2056 """Add a new revision to current repository.
2041 """Add a new revision to current repository.
2057 Revision information is passed via the context argument.
2042 Revision information is passed via the context argument.
2058
2043
2059 ctx.files() should list all files involved in this commit, i.e.
2044 ctx.files() should list all files involved in this commit, i.e.
2060 modified/added/removed files. On merge, it may be wider than the
2045 modified/added/removed files. On merge, it may be wider than the
2061 ctx.files() to be committed, since any file nodes derived directly
2046 ctx.files() to be committed, since any file nodes derived directly
2062 from p1 or p2 are excluded from the committed ctx.files().
2047 from p1 or p2 are excluded from the committed ctx.files().
2063 """
2048 """
2064
2049
2065 tr = None
2050 tr = None
2066 p1, p2 = ctx.p1(), ctx.p2()
2051 p1, p2 = ctx.p1(), ctx.p2()
2067 user = ctx.user()
2052 user = ctx.user()
2068
2053
2069 lock = self.lock()
2054 lock = self.lock()
2070 try:
2055 try:
2071 tr = self.transaction("commit")
2056 tr = self.transaction("commit")
2072 trp = weakref.proxy(tr)
2057 trp = weakref.proxy(tr)
2073
2058
2074 if ctx.manifestnode():
2059 if ctx.manifestnode():
2075 # reuse an existing manifest revision
2060 # reuse an existing manifest revision
2076 self.ui.debug('reusing known manifest\n')
2061 self.ui.debug('reusing known manifest\n')
2077 mn = ctx.manifestnode()
2062 mn = ctx.manifestnode()
2078 files = ctx.files()
2063 files = ctx.files()
2079 elif ctx.files():
2064 elif ctx.files():
2080 m1ctx = p1.manifestctx()
2065 m1ctx = p1.manifestctx()
2081 m2ctx = p2.manifestctx()
2066 m2ctx = p2.manifestctx()
2082 mctx = m1ctx.copy()
2067 mctx = m1ctx.copy()
2083
2068
2084 m = mctx.read()
2069 m = mctx.read()
2085 m1 = m1ctx.read()
2070 m1 = m1ctx.read()
2086 m2 = m2ctx.read()
2071 m2 = m2ctx.read()
2087
2072
2088 # check in files
2073 # check in files
2089 added = []
2074 added = []
2090 changed = []
2075 changed = []
2091 removed = list(ctx.removed())
2076 removed = list(ctx.removed())
2092 linkrev = len(self)
2077 linkrev = len(self)
2093 self.ui.note(_("committing files:\n"))
2078 self.ui.note(_("committing files:\n"))
2094 for f in sorted(ctx.modified() + ctx.added()):
2079 for f in sorted(ctx.modified() + ctx.added()):
2095 self.ui.note(f + "\n")
2080 self.ui.note(f + "\n")
2096 try:
2081 try:
2097 fctx = ctx[f]
2082 fctx = ctx[f]
2098 if fctx is None:
2083 if fctx is None:
2099 removed.append(f)
2084 removed.append(f)
2100 else:
2085 else:
2101 added.append(f)
2086 added.append(f)
2102 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2087 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2103 trp, changed)
2088 trp, changed)
2104 m.setflag(f, fctx.flags())
2089 m.setflag(f, fctx.flags())
2105 except OSError as inst:
2090 except OSError as inst:
2106 self.ui.warn(_("trouble committing %s!\n") % f)
2091 self.ui.warn(_("trouble committing %s!\n") % f)
2107 raise
2092 raise
2108 except IOError as inst:
2093 except IOError as inst:
2109 errcode = getattr(inst, 'errno', errno.ENOENT)
2094 errcode = getattr(inst, 'errno', errno.ENOENT)
2110 if error or errcode and errcode != errno.ENOENT:
2095 if error or errcode and errcode != errno.ENOENT:
2111 self.ui.warn(_("trouble committing %s!\n") % f)
2096 self.ui.warn(_("trouble committing %s!\n") % f)
2112 raise
2097 raise
2113
2098
2114 # update manifest
2099 # update manifest
2115 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2100 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2116 drop = [f for f in removed if f in m]
2101 drop = [f for f in removed if f in m]
2117 for f in drop:
2102 for f in drop:
2118 del m[f]
2103 del m[f]
2119 files = changed + removed
2104 files = changed + removed
2120 md = None
2105 md = None
2121 if not files:
2106 if not files:
2122 # if no "files" actually changed in terms of the changelog,
2107 # if no "files" actually changed in terms of the changelog,
2123 # try hard to detect unmodified manifest entry so that the
2108 # try hard to detect unmodified manifest entry so that the
2124 # exact same commit can be reproduced later on convert.
2109 # exact same commit can be reproduced later on convert.
2125 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2110 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2126 if not files and md:
2111 if not files and md:
2127 self.ui.debug('not reusing manifest (no file change in '
2112 self.ui.debug('not reusing manifest (no file change in '
2128 'changelog, but manifest differs)\n')
2113 'changelog, but manifest differs)\n')
2129 if files or md:
2114 if files or md:
2130 self.ui.note(_("committing manifest\n"))
2115 self.ui.note(_("committing manifest\n"))
2131 mn = mctx.write(trp, linkrev,
2116 mn = mctx.write(trp, linkrev,
2132 p1.manifestnode(), p2.manifestnode(),
2117 p1.manifestnode(), p2.manifestnode(),
2133 added, drop)
2118 added, drop)
2134 else:
2119 else:
2135 self.ui.debug('reusing manifest form p1 (listed files '
2120 self.ui.debug('reusing manifest form p1 (listed files '
2136 'actually unchanged)\n')
2121 'actually unchanged)\n')
2137 mn = p1.manifestnode()
2122 mn = p1.manifestnode()
2138 else:
2123 else:
2139 self.ui.debug('reusing manifest from p1 (no file change)\n')
2124 self.ui.debug('reusing manifest from p1 (no file change)\n')
2140 mn = p1.manifestnode()
2125 mn = p1.manifestnode()
2141 files = []
2126 files = []
2142
2127
2143 # update changelog
2128 # update changelog
2144 self.ui.note(_("committing changelog\n"))
2129 self.ui.note(_("committing changelog\n"))
2145 self.changelog.delayupdate(tr)
2130 self.changelog.delayupdate(tr)
2146 n = self.changelog.add(mn, files, ctx.description(),
2131 n = self.changelog.add(mn, files, ctx.description(),
2147 trp, p1.node(), p2.node(),
2132 trp, p1.node(), p2.node(),
2148 user, ctx.date(), ctx.extra().copy())
2133 user, ctx.date(), ctx.extra().copy())
2149 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2134 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2150 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2135 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2151 parent2=xp2)
2136 parent2=xp2)
2152 # set the new commit is proper phase
2137 # set the new commit is proper phase
2153 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2138 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2154 if targetphase:
2139 if targetphase:
2155 # retract boundary do not alter parent changeset.
2140 # retract boundary do not alter parent changeset.
2156 # if a parent have higher the resulting phase will
2141 # if a parent have higher the resulting phase will
2157 # be compliant anyway
2142 # be compliant anyway
2158 #
2143 #
2159 # if minimal phase was 0 we don't need to retract anything
2144 # if minimal phase was 0 we don't need to retract anything
2160 phases.registernew(self, tr, targetphase, [n])
2145 phases.registernew(self, tr, targetphase, [n])
2161 tr.close()
2146 tr.close()
2162 return n
2147 return n
2163 finally:
2148 finally:
2164 if tr:
2149 if tr:
2165 tr.release()
2150 tr.release()
2166 lock.release()
2151 lock.release()
2167
2152
2168 @unfilteredmethod
2153 @unfilteredmethod
2169 def destroying(self):
2154 def destroying(self):
2170 '''Inform the repository that nodes are about to be destroyed.
2155 '''Inform the repository that nodes are about to be destroyed.
2171 Intended for use by strip and rollback, so there's a common
2156 Intended for use by strip and rollback, so there's a common
2172 place for anything that has to be done before destroying history.
2157 place for anything that has to be done before destroying history.
2173
2158
2174 This is mostly useful for saving state that is in memory and waiting
2159 This is mostly useful for saving state that is in memory and waiting
2175 to be flushed when the current lock is released. Because a call to
2160 to be flushed when the current lock is released. Because a call to
2176 destroyed is imminent, the repo will be invalidated causing those
2161 destroyed is imminent, the repo will be invalidated causing those
2177 changes to stay in memory (waiting for the next unlock), or vanish
2162 changes to stay in memory (waiting for the next unlock), or vanish
2178 completely.
2163 completely.
2179 '''
2164 '''
2180 # When using the same lock to commit and strip, the phasecache is left
2165 # When using the same lock to commit and strip, the phasecache is left
2181 # dirty after committing. Then when we strip, the repo is invalidated,
2166 # dirty after committing. Then when we strip, the repo is invalidated,
2182 # causing those changes to disappear.
2167 # causing those changes to disappear.
2183 if '_phasecache' in vars(self):
2168 if '_phasecache' in vars(self):
2184 self._phasecache.write()
2169 self._phasecache.write()
2185
2170
2186 @unfilteredmethod
2171 @unfilteredmethod
2187 def destroyed(self):
2172 def destroyed(self):
2188 '''Inform the repository that nodes have been destroyed.
2173 '''Inform the repository that nodes have been destroyed.
2189 Intended for use by strip and rollback, so there's a common
2174 Intended for use by strip and rollback, so there's a common
2190 place for anything that has to be done after destroying history.
2175 place for anything that has to be done after destroying history.
2191 '''
2176 '''
2192 # When one tries to:
2177 # When one tries to:
2193 # 1) destroy nodes thus calling this method (e.g. strip)
2178 # 1) destroy nodes thus calling this method (e.g. strip)
2194 # 2) use phasecache somewhere (e.g. commit)
2179 # 2) use phasecache somewhere (e.g. commit)
2195 #
2180 #
2196 # then 2) will fail because the phasecache contains nodes that were
2181 # then 2) will fail because the phasecache contains nodes that were
2197 # removed. We can either remove phasecache from the filecache,
2182 # removed. We can either remove phasecache from the filecache,
2198 # causing it to reload next time it is accessed, or simply filter
2183 # causing it to reload next time it is accessed, or simply filter
2199 # the removed nodes now and write the updated cache.
2184 # the removed nodes now and write the updated cache.
2200 self._phasecache.filterunknown(self)
2185 self._phasecache.filterunknown(self)
2201 self._phasecache.write()
2186 self._phasecache.write()
2202
2187
2203 # refresh all repository caches
2188 # refresh all repository caches
2204 self.updatecaches()
2189 self.updatecaches()
2205
2190
2206 # Ensure the persistent tag cache is updated. Doing it now
2191 # Ensure the persistent tag cache is updated. Doing it now
2207 # means that the tag cache only has to worry about destroyed
2192 # means that the tag cache only has to worry about destroyed
2208 # heads immediately after a strip/rollback. That in turn
2193 # heads immediately after a strip/rollback. That in turn
2209 # guarantees that "cachetip == currenttip" (comparing both rev
2194 # guarantees that "cachetip == currenttip" (comparing both rev
2210 # and node) always means no nodes have been added or destroyed.
2195 # and node) always means no nodes have been added or destroyed.
2211
2196
2212 # XXX this is suboptimal when qrefresh'ing: we strip the current
2197 # XXX this is suboptimal when qrefresh'ing: we strip the current
2213 # head, refresh the tag cache, then immediately add a new head.
2198 # head, refresh the tag cache, then immediately add a new head.
2214 # But I think doing it this way is necessary for the "instant
2199 # But I think doing it this way is necessary for the "instant
2215 # tag cache retrieval" case to work.
2200 # tag cache retrieval" case to work.
2216 self.invalidate()
2201 self.invalidate()
2217
2202
2218 def status(self, node1='.', node2=None, match=None,
2203 def status(self, node1='.', node2=None, match=None,
2219 ignored=False, clean=False, unknown=False,
2204 ignored=False, clean=False, unknown=False,
2220 listsubrepos=False):
2205 listsubrepos=False):
2221 '''a convenience method that calls node1.status(node2)'''
2206 '''a convenience method that calls node1.status(node2)'''
2222 return self[node1].status(node2, match, ignored, clean, unknown,
2207 return self[node1].status(node2, match, ignored, clean, unknown,
2223 listsubrepos)
2208 listsubrepos)
2224
2209
2225 def addpostdsstatus(self, ps):
2210 def addpostdsstatus(self, ps):
2226 """Add a callback to run within the wlock, at the point at which status
2211 """Add a callback to run within the wlock, at the point at which status
2227 fixups happen.
2212 fixups happen.
2228
2213
2229 On status completion, callback(wctx, status) will be called with the
2214 On status completion, callback(wctx, status) will be called with the
2230 wlock held, unless the dirstate has changed from underneath or the wlock
2215 wlock held, unless the dirstate has changed from underneath or the wlock
2231 couldn't be grabbed.
2216 couldn't be grabbed.
2232
2217
2233 Callbacks should not capture and use a cached copy of the dirstate --
2218 Callbacks should not capture and use a cached copy of the dirstate --
2234 it might change in the meanwhile. Instead, they should access the
2219 it might change in the meanwhile. Instead, they should access the
2235 dirstate via wctx.repo().dirstate.
2220 dirstate via wctx.repo().dirstate.
2236
2221
2237 This list is emptied out after each status run -- extensions should
2222 This list is emptied out after each status run -- extensions should
2238 make sure it adds to this list each time dirstate.status is called.
2223 make sure it adds to this list each time dirstate.status is called.
2239 Extensions should also make sure they don't call this for statuses
2224 Extensions should also make sure they don't call this for statuses
2240 that don't involve the dirstate.
2225 that don't involve the dirstate.
2241 """
2226 """
2242
2227
2243 # The list is located here for uniqueness reasons -- it is actually
2228 # The list is located here for uniqueness reasons -- it is actually
2244 # managed by the workingctx, but that isn't unique per-repo.
2229 # managed by the workingctx, but that isn't unique per-repo.
2245 self._postdsstatus.append(ps)
2230 self._postdsstatus.append(ps)
2246
2231
2247 def postdsstatus(self):
2232 def postdsstatus(self):
2248 """Used by workingctx to get the list of post-dirstate-status hooks."""
2233 """Used by workingctx to get the list of post-dirstate-status hooks."""
2249 return self._postdsstatus
2234 return self._postdsstatus
2250
2235
2251 def clearpostdsstatus(self):
2236 def clearpostdsstatus(self):
2252 """Used by workingctx to clear post-dirstate-status hooks."""
2237 """Used by workingctx to clear post-dirstate-status hooks."""
2253 del self._postdsstatus[:]
2238 del self._postdsstatus[:]
2254
2239
2255 def heads(self, start=None):
2240 def heads(self, start=None):
2256 if start is None:
2241 if start is None:
2257 cl = self.changelog
2242 cl = self.changelog
2258 headrevs = reversed(cl.headrevs())
2243 headrevs = reversed(cl.headrevs())
2259 return [cl.node(rev) for rev in headrevs]
2244 return [cl.node(rev) for rev in headrevs]
2260
2245
2261 heads = self.changelog.heads(start)
2246 heads = self.changelog.heads(start)
2262 # sort the output in rev descending order
2247 # sort the output in rev descending order
2263 return sorted(heads, key=self.changelog.rev, reverse=True)
2248 return sorted(heads, key=self.changelog.rev, reverse=True)
2264
2249
2265 def branchheads(self, branch=None, start=None, closed=False):
2250 def branchheads(self, branch=None, start=None, closed=False):
2266 '''return a (possibly filtered) list of heads for the given branch
2251 '''return a (possibly filtered) list of heads for the given branch
2267
2252
2268 Heads are returned in topological order, from newest to oldest.
2253 Heads are returned in topological order, from newest to oldest.
2269 If branch is None, use the dirstate branch.
2254 If branch is None, use the dirstate branch.
2270 If start is not None, return only heads reachable from start.
2255 If start is not None, return only heads reachable from start.
2271 If closed is True, return heads that are marked as closed as well.
2256 If closed is True, return heads that are marked as closed as well.
2272 '''
2257 '''
2273 if branch is None:
2258 if branch is None:
2274 branch = self[None].branch()
2259 branch = self[None].branch()
2275 branches = self.branchmap()
2260 branches = self.branchmap()
2276 if branch not in branches:
2261 if branch not in branches:
2277 return []
2262 return []
2278 # the cache returns heads ordered lowest to highest
2263 # the cache returns heads ordered lowest to highest
2279 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2264 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2280 if start is not None:
2265 if start is not None:
2281 # filter out the heads that cannot be reached from startrev
2266 # filter out the heads that cannot be reached from startrev
2282 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2267 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2283 bheads = [h for h in bheads if h in fbheads]
2268 bheads = [h for h in bheads if h in fbheads]
2284 return bheads
2269 return bheads
2285
2270
2286 def branches(self, nodes):
2271 def branches(self, nodes):
2287 if not nodes:
2272 if not nodes:
2288 nodes = [self.changelog.tip()]
2273 nodes = [self.changelog.tip()]
2289 b = []
2274 b = []
2290 for n in nodes:
2275 for n in nodes:
2291 t = n
2276 t = n
2292 while True:
2277 while True:
2293 p = self.changelog.parents(n)
2278 p = self.changelog.parents(n)
2294 if p[1] != nullid or p[0] == nullid:
2279 if p[1] != nullid or p[0] == nullid:
2295 b.append((t, n, p[0], p[1]))
2280 b.append((t, n, p[0], p[1]))
2296 break
2281 break
2297 n = p[0]
2282 n = p[0]
2298 return b
2283 return b
2299
2284
2300 def between(self, pairs):
2285 def between(self, pairs):
2301 r = []
2286 r = []
2302
2287
2303 for top, bottom in pairs:
2288 for top, bottom in pairs:
2304 n, l, i = top, [], 0
2289 n, l, i = top, [], 0
2305 f = 1
2290 f = 1
2306
2291
2307 while n != bottom and n != nullid:
2292 while n != bottom and n != nullid:
2308 p = self.changelog.parents(n)[0]
2293 p = self.changelog.parents(n)[0]
2309 if i == f:
2294 if i == f:
2310 l.append(n)
2295 l.append(n)
2311 f = f * 2
2296 f = f * 2
2312 n = p
2297 n = p
2313 i += 1
2298 i += 1
2314
2299
2315 r.append(l)
2300 r.append(l)
2316
2301
2317 return r
2302 return r
2318
2303
2319 def checkpush(self, pushop):
2304 def checkpush(self, pushop):
2320 """Extensions can override this function if additional checks have
2305 """Extensions can override this function if additional checks have
2321 to be performed before pushing, or call it if they override push
2306 to be performed before pushing, or call it if they override push
2322 command.
2307 command.
2323 """
2308 """
2324
2309
2325 @unfilteredpropertycache
2310 @unfilteredpropertycache
2326 def prepushoutgoinghooks(self):
2311 def prepushoutgoinghooks(self):
2327 """Return util.hooks consists of a pushop with repo, remote, outgoing
2312 """Return util.hooks consists of a pushop with repo, remote, outgoing
2328 methods, which are called before pushing changesets.
2313 methods, which are called before pushing changesets.
2329 """
2314 """
2330 return util.hooks()
2315 return util.hooks()
2331
2316
2332 def pushkey(self, namespace, key, old, new):
2317 def pushkey(self, namespace, key, old, new):
2333 try:
2318 try:
2334 tr = self.currenttransaction()
2319 tr = self.currenttransaction()
2335 hookargs = {}
2320 hookargs = {}
2336 if tr is not None:
2321 if tr is not None:
2337 hookargs.update(tr.hookargs)
2322 hookargs.update(tr.hookargs)
2338 hookargs = pycompat.strkwargs(hookargs)
2323 hookargs = pycompat.strkwargs(hookargs)
2339 hookargs[r'namespace'] = namespace
2324 hookargs[r'namespace'] = namespace
2340 hookargs[r'key'] = key
2325 hookargs[r'key'] = key
2341 hookargs[r'old'] = old
2326 hookargs[r'old'] = old
2342 hookargs[r'new'] = new
2327 hookargs[r'new'] = new
2343 self.hook('prepushkey', throw=True, **hookargs)
2328 self.hook('prepushkey', throw=True, **hookargs)
2344 except error.HookAbort as exc:
2329 except error.HookAbort as exc:
2345 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2330 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2346 if exc.hint:
2331 if exc.hint:
2347 self.ui.write_err(_("(%s)\n") % exc.hint)
2332 self.ui.write_err(_("(%s)\n") % exc.hint)
2348 return False
2333 return False
2349 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2334 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2350 ret = pushkey.push(self, namespace, key, old, new)
2335 ret = pushkey.push(self, namespace, key, old, new)
2351 def runhook():
2336 def runhook():
2352 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2337 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2353 ret=ret)
2338 ret=ret)
2354 self._afterlock(runhook)
2339 self._afterlock(runhook)
2355 return ret
2340 return ret
2356
2341
2357 def listkeys(self, namespace):
2342 def listkeys(self, namespace):
2358 self.hook('prelistkeys', throw=True, namespace=namespace)
2343 self.hook('prelistkeys', throw=True, namespace=namespace)
2359 self.ui.debug('listing keys for "%s"\n' % namespace)
2344 self.ui.debug('listing keys for "%s"\n' % namespace)
2360 values = pushkey.list(self, namespace)
2345 values = pushkey.list(self, namespace)
2361 self.hook('listkeys', namespace=namespace, values=values)
2346 self.hook('listkeys', namespace=namespace, values=values)
2362 return values
2347 return values
2363
2348
2364 def debugwireargs(self, one, two, three=None, four=None, five=None):
2349 def debugwireargs(self, one, two, three=None, four=None, five=None):
2365 '''used to test argument passing over the wire'''
2350 '''used to test argument passing over the wire'''
2366 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2351 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2367 pycompat.bytestr(four),
2352 pycompat.bytestr(four),
2368 pycompat.bytestr(five))
2353 pycompat.bytestr(five))
2369
2354
2370 def savecommitmessage(self, text):
2355 def savecommitmessage(self, text):
2371 fp = self.vfs('last-message.txt', 'wb')
2356 fp = self.vfs('last-message.txt', 'wb')
2372 try:
2357 try:
2373 fp.write(text)
2358 fp.write(text)
2374 finally:
2359 finally:
2375 fp.close()
2360 fp.close()
2376 return self.pathto(fp.name[len(self.root) + 1:])
2361 return self.pathto(fp.name[len(self.root) + 1:])
2377
2362
2378 # used to avoid circular references so destructors work
2363 # used to avoid circular references so destructors work
2379 def aftertrans(files):
2364 def aftertrans(files):
2380 renamefiles = [tuple(t) for t in files]
2365 renamefiles = [tuple(t) for t in files]
2381 def a():
2366 def a():
2382 for vfs, src, dest in renamefiles:
2367 for vfs, src, dest in renamefiles:
2383 # if src and dest refer to a same file, vfs.rename is a no-op,
2368 # if src and dest refer to a same file, vfs.rename is a no-op,
2384 # leaving both src and dest on disk. delete dest to make sure
2369 # leaving both src and dest on disk. delete dest to make sure
2385 # the rename couldn't be such a no-op.
2370 # the rename couldn't be such a no-op.
2386 vfs.tryunlink(dest)
2371 vfs.tryunlink(dest)
2387 try:
2372 try:
2388 vfs.rename(src, dest)
2373 vfs.rename(src, dest)
2389 except OSError: # journal file does not yet exist
2374 except OSError: # journal file does not yet exist
2390 pass
2375 pass
2391 return a
2376 return a
2392
2377
2393 def undoname(fn):
2378 def undoname(fn):
2394 base, name = os.path.split(fn)
2379 base, name = os.path.split(fn)
2395 assert name.startswith('journal')
2380 assert name.startswith('journal')
2396 return os.path.join(base, name.replace('journal', 'undo', 1))
2381 return os.path.join(base, name.replace('journal', 'undo', 1))
2397
2382
2398 def instance(ui, path, create, intents=None):
2383 def instance(ui, path, create, intents=None):
2399 return localrepository(ui, util.urllocalpath(path), create,
2384 if create:
2400 intents=intents)
2385 vfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2386
2387 if vfs.exists('.hg'):
2388 raise error.RepoError(_('repository %s already exists') % path)
2389
2390 createrepository(ui, vfs)
2391
2392 return localrepository(ui, util.urllocalpath(path), intents=intents)
2401
2393
2402 def islocal(path):
2394 def islocal(path):
2403 return True
2395 return True
2404
2396
2405 def newreporequirements(ui):
2397 def newreporequirements(ui):
2406 """Determine the set of requirements for a new local repository.
2398 """Determine the set of requirements for a new local repository.
2407
2399
2408 Extensions can wrap this function to specify custom requirements for
2400 Extensions can wrap this function to specify custom requirements for
2409 new repositories.
2401 new repositories.
2410 """
2402 """
2411 requirements = {'revlogv1'}
2403 requirements = {'revlogv1'}
2412 if ui.configbool('format', 'usestore'):
2404 if ui.configbool('format', 'usestore'):
2413 requirements.add('store')
2405 requirements.add('store')
2414 if ui.configbool('format', 'usefncache'):
2406 if ui.configbool('format', 'usefncache'):
2415 requirements.add('fncache')
2407 requirements.add('fncache')
2416 if ui.configbool('format', 'dotencode'):
2408 if ui.configbool('format', 'dotencode'):
2417 requirements.add('dotencode')
2409 requirements.add('dotencode')
2418
2410
2419 compengine = ui.config('experimental', 'format.compression')
2411 compengine = ui.config('experimental', 'format.compression')
2420 if compengine not in util.compengines:
2412 if compengine not in util.compengines:
2421 raise error.Abort(_('compression engine %s defined by '
2413 raise error.Abort(_('compression engine %s defined by '
2422 'experimental.format.compression not available') %
2414 'experimental.format.compression not available') %
2423 compengine,
2415 compengine,
2424 hint=_('run "hg debuginstall" to list available '
2416 hint=_('run "hg debuginstall" to list available '
2425 'compression engines'))
2417 'compression engines'))
2426
2418
2427 # zlib is the historical default and doesn't need an explicit requirement.
2419 # zlib is the historical default and doesn't need an explicit requirement.
2428 if compengine != 'zlib':
2420 if compengine != 'zlib':
2429 requirements.add('exp-compression-%s' % compengine)
2421 requirements.add('exp-compression-%s' % compengine)
2430
2422
2431 if scmutil.gdinitconfig(ui):
2423 if scmutil.gdinitconfig(ui):
2432 requirements.add('generaldelta')
2424 requirements.add('generaldelta')
2433 if ui.configbool('experimental', 'treemanifest'):
2425 if ui.configbool('experimental', 'treemanifest'):
2434 requirements.add('treemanifest')
2426 requirements.add('treemanifest')
2435 # experimental config: format.sparse-revlog
2427 # experimental config: format.sparse-revlog
2436 if ui.configbool('format', 'sparse-revlog'):
2428 if ui.configbool('format', 'sparse-revlog'):
2437 requirements.add(SPARSEREVLOG_REQUIREMENT)
2429 requirements.add(SPARSEREVLOG_REQUIREMENT)
2438
2430
2439 revlogv2 = ui.config('experimental', 'revlogv2')
2431 revlogv2 = ui.config('experimental', 'revlogv2')
2440 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2432 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2441 requirements.remove('revlogv1')
2433 requirements.remove('revlogv1')
2442 # generaldelta is implied by revlogv2.
2434 # generaldelta is implied by revlogv2.
2443 requirements.discard('generaldelta')
2435 requirements.discard('generaldelta')
2444 requirements.add(REVLOGV2_REQUIREMENT)
2436 requirements.add(REVLOGV2_REQUIREMENT)
2445 # experimental config: format.internal-phase
2437 # experimental config: format.internal-phase
2446 if ui.configbool('format', 'internal-phase'):
2438 if ui.configbool('format', 'internal-phase'):
2447 requirements.add('internal-phase')
2439 requirements.add('internal-phase')
2448
2440
2449 return requirements
2441 return requirements
2442
2443 def createrepository(ui, wdirvfs):
2444 """Create a new repository in a vfs.
2445
2446 ``wdirvfs`` is a vfs instance pointing at the working directory.
2447 ``requirements`` is a set of requirements for the new repository.
2448 """
2449 requirements = newreporequirements(ui)
2450
2451 if not wdirvfs.exists():
2452 wdirvfs.makedirs()
2453
2454 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2455 hgvfs.makedir(notindexed=True)
2456
2457 if b'store' in requirements:
2458 hgvfs.mkdir(b'store')
2459
2460 # We create an invalid changelog outside the store so very old
2461 # Mercurial versions (which didn't know about the requirements
2462 # file) encounter an error on reading the changelog. This
2463 # effectively locks out old clients and prevents them from
2464 # mucking with a repo in an unknown format.
2465 #
2466 # The revlog header has version 2, which won't be recognized by
2467 # such old clients.
2468 hgvfs.append(b'00changelog.i',
2469 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2470 b'layout')
2471
2472 scmutil.writerequires(hgvfs, requirements)
@@ -1,47 +1,47 b''
1 #!/usr/bin/env python
1 #!/usr/bin/env python
2 from __future__ import absolute_import, print_function
2 from __future__ import absolute_import, print_function
3
3
4 import sys
4 import sys
5
5
6 from mercurial import (
6 from mercurial import (
7 commands,
7 commands,
8 localrepo,
8 localrepo,
9 ui as uimod,
9 ui as uimod,
10 )
10 )
11
11
12 print_ = print
12 print_ = print
13 def print(*args, **kwargs):
13 def print(*args, **kwargs):
14 """print() wrapper that flushes stdout buffers to avoid py3 buffer issues
14 """print() wrapper that flushes stdout buffers to avoid py3 buffer issues
15
15
16 We could also just write directly to sys.stdout.buffer the way the
16 We could also just write directly to sys.stdout.buffer the way the
17 ui object will, but this was easier for porting the test.
17 ui object will, but this was easier for porting the test.
18 """
18 """
19 print_(*args, **kwargs)
19 print_(*args, **kwargs)
20 sys.stdout.flush()
20 sys.stdout.flush()
21
21
22 u = uimod.ui.load()
22 u = uimod.ui.load()
23
23
24 print('% creating repo')
24 print('% creating repo')
25 repo = localrepo.localrepository(u, b'.', create=True)
25 repo = localrepo.instance(u, b'.', create=True)
26
26
27 f = open('test.py', 'w')
27 f = open('test.py', 'w')
28 try:
28 try:
29 f.write('foo\n')
29 f.write('foo\n')
30 finally:
30 finally:
31 f.close
31 f.close
32
32
33 print('% add and commit')
33 print('% add and commit')
34 commands.add(u, repo, b'test.py')
34 commands.add(u, repo, b'test.py')
35 commands.commit(u, repo, message=b'*')
35 commands.commit(u, repo, message=b'*')
36 commands.status(u, repo, clean=True)
36 commands.status(u, repo, clean=True)
37
37
38
38
39 print('% change')
39 print('% change')
40 f = open('test.py', 'w')
40 f = open('test.py', 'w')
41 try:
41 try:
42 f.write('bar\n')
42 f.write('bar\n')
43 finally:
43 finally:
44 f.close()
44 f.close()
45
45
46 # this would return clean instead of changed before the fix
46 # this would return clean instead of changed before the fix
47 commands.status(u, repo, clean=True, modified=True)
47 commands.status(u, repo, clean=True, modified=True)
General Comments 0
You need to be logged in to leave comments. Login now