##// END OF EJS Templates
pathutil: tease out a new library to break an import cycle from canonpath use
Augie Fackler -
r20033:f9628707 default
parent child Browse files
Show More
@@ -0,0 +1,144 b''
1 import os, errno, stat
2
3 import util
4 from i18n import _
5
6 class pathauditor(object):
7 '''ensure that a filesystem path contains no banned components.
8 the following properties of a path are checked:
9
10 - ends with a directory separator
11 - under top-level .hg
12 - starts at the root of a windows drive
13 - contains ".."
14 - traverses a symlink (e.g. a/symlink_here/b)
15 - inside a nested repository (a callback can be used to approve
16 some nested repositories, e.g., subrepositories)
17 '''
18
19 def __init__(self, root, callback=None):
20 self.audited = set()
21 self.auditeddir = set()
22 self.root = root
23 self.callback = callback
24 if os.path.lexists(root) and not util.checkcase(root):
25 self.normcase = util.normcase
26 else:
27 self.normcase = lambda x: x
28
29 def __call__(self, path):
30 '''Check the relative path.
31 path may contain a pattern (e.g. foodir/**.txt)'''
32
33 path = util.localpath(path)
34 normpath = self.normcase(path)
35 if normpath in self.audited:
36 return
37 # AIX ignores "/" at end of path, others raise EISDIR.
38 if util.endswithsep(path):
39 raise util.Abort(_("path ends in directory separator: %s") % path)
40 parts = util.splitpath(path)
41 if (os.path.splitdrive(path)[0]
42 or parts[0].lower() in ('.hg', '.hg.', '')
43 or os.pardir in parts):
44 raise util.Abort(_("path contains illegal component: %s") % path)
45 if '.hg' in path.lower():
46 lparts = [p.lower() for p in parts]
47 for p in '.hg', '.hg.':
48 if p in lparts[1:]:
49 pos = lparts.index(p)
50 base = os.path.join(*parts[:pos])
51 raise util.Abort(_("path '%s' is inside nested repo %r")
52 % (path, base))
53
54 normparts = util.splitpath(normpath)
55 assert len(parts) == len(normparts)
56
57 parts.pop()
58 normparts.pop()
59 prefixes = []
60 while parts:
61 prefix = os.sep.join(parts)
62 normprefix = os.sep.join(normparts)
63 if normprefix in self.auditeddir:
64 break
65 curpath = os.path.join(self.root, prefix)
66 try:
67 st = os.lstat(curpath)
68 except OSError, err:
69 # EINVAL can be raised as invalid path syntax under win32.
70 # They must be ignored for patterns can be checked too.
71 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
72 raise
73 else:
74 if stat.S_ISLNK(st.st_mode):
75 raise util.Abort(
76 _('path %r traverses symbolic link %r')
77 % (path, prefix))
78 elif (stat.S_ISDIR(st.st_mode) and
79 os.path.isdir(os.path.join(curpath, '.hg'))):
80 if not self.callback or not self.callback(curpath):
81 raise util.Abort(_("path '%s' is inside nested "
82 "repo %r")
83 % (path, prefix))
84 prefixes.append(normprefix)
85 parts.pop()
86 normparts.pop()
87
88 self.audited.add(normpath)
89 # only add prefixes to the cache after checking everything: we don't
90 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
91 self.auditeddir.update(prefixes)
92
93 def check(self, path):
94 try:
95 self(path)
96 return True
97 except (OSError, util.Abort):
98 return False
99
100 def canonpath(root, cwd, myname, auditor=None):
101 '''return the canonical path of myname, given cwd and root'''
102 if util.endswithsep(root):
103 rootsep = root
104 else:
105 rootsep = root + os.sep
106 name = myname
107 if not os.path.isabs(name):
108 name = os.path.join(root, cwd, name)
109 name = os.path.normpath(name)
110 if auditor is None:
111 auditor = pathauditor(root)
112 if name != rootsep and name.startswith(rootsep):
113 name = name[len(rootsep):]
114 auditor(name)
115 return util.pconvert(name)
116 elif name == root:
117 return ''
118 else:
119 # Determine whether `name' is in the hierarchy at or beneath `root',
120 # by iterating name=dirname(name) until that causes no change (can't
121 # check name == '/', because that doesn't work on windows). The list
122 # `rel' holds the reversed list of components making up the relative
123 # file name we want.
124 rel = []
125 while True:
126 try:
127 s = util.samefile(name, root)
128 except OSError:
129 s = False
130 if s:
131 if not rel:
132 # name was actually the same as root (maybe a symlink)
133 return ''
134 rel.reverse()
135 name = os.path.join(*rel)
136 auditor(name)
137 return util.pconvert(name)
138 dirname, basename = util.split(name)
139 rel.append(basename)
140 if dirname == name:
141 break
142 name = dirname
143
144 raise util.Abort(_("%s not under root '%s'") % (myname, root))
@@ -1,732 +1,732 b''
1 # keyword.py - $Keyword$ expansion for Mercurial
1 # keyword.py - $Keyword$ expansion for Mercurial
2 #
2 #
3 # Copyright 2007-2012 Christian Ebert <blacktrash@gmx.net>
3 # Copyright 2007-2012 Christian Ebert <blacktrash@gmx.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 #
7 #
8 # $Id$
8 # $Id$
9 #
9 #
10 # Keyword expansion hack against the grain of a Distributed SCM
10 # Keyword expansion hack against the grain of a Distributed SCM
11 #
11 #
12 # There are many good reasons why this is not needed in a distributed
12 # There are many good reasons why this is not needed in a distributed
13 # SCM, still it may be useful in very small projects based on single
13 # SCM, still it may be useful in very small projects based on single
14 # files (like LaTeX packages), that are mostly addressed to an
14 # files (like LaTeX packages), that are mostly addressed to an
15 # audience not running a version control system.
15 # audience not running a version control system.
16 #
16 #
17 # For in-depth discussion refer to
17 # For in-depth discussion refer to
18 # <http://mercurial.selenic.com/wiki/KeywordPlan>.
18 # <http://mercurial.selenic.com/wiki/KeywordPlan>.
19 #
19 #
20 # Keyword expansion is based on Mercurial's changeset template mappings.
20 # Keyword expansion is based on Mercurial's changeset template mappings.
21 #
21 #
22 # Binary files are not touched.
22 # Binary files are not touched.
23 #
23 #
24 # Files to act upon/ignore are specified in the [keyword] section.
24 # Files to act upon/ignore are specified in the [keyword] section.
25 # Customized keyword template mappings in the [keywordmaps] section.
25 # Customized keyword template mappings in the [keywordmaps] section.
26 #
26 #
27 # Run "hg help keyword" and "hg kwdemo" to get info on configuration.
27 # Run "hg help keyword" and "hg kwdemo" to get info on configuration.
28
28
29 '''expand keywords in tracked files
29 '''expand keywords in tracked files
30
30
31 This extension expands RCS/CVS-like or self-customized $Keywords$ in
31 This extension expands RCS/CVS-like or self-customized $Keywords$ in
32 tracked text files selected by your configuration.
32 tracked text files selected by your configuration.
33
33
34 Keywords are only expanded in local repositories and not stored in the
34 Keywords are only expanded in local repositories and not stored in the
35 change history. The mechanism can be regarded as a convenience for the
35 change history. The mechanism can be regarded as a convenience for the
36 current user or for archive distribution.
36 current user or for archive distribution.
37
37
38 Keywords expand to the changeset data pertaining to the latest change
38 Keywords expand to the changeset data pertaining to the latest change
39 relative to the working directory parent of each file.
39 relative to the working directory parent of each file.
40
40
41 Configuration is done in the [keyword], [keywordset] and [keywordmaps]
41 Configuration is done in the [keyword], [keywordset] and [keywordmaps]
42 sections of hgrc files.
42 sections of hgrc files.
43
43
44 Example::
44 Example::
45
45
46 [keyword]
46 [keyword]
47 # expand keywords in every python file except those matching "x*"
47 # expand keywords in every python file except those matching "x*"
48 **.py =
48 **.py =
49 x* = ignore
49 x* = ignore
50
50
51 [keywordset]
51 [keywordset]
52 # prefer svn- over cvs-like default keywordmaps
52 # prefer svn- over cvs-like default keywordmaps
53 svn = True
53 svn = True
54
54
55 .. note::
55 .. note::
56
56
57 The more specific you are in your filename patterns the less you
57 The more specific you are in your filename patterns the less you
58 lose speed in huge repositories.
58 lose speed in huge repositories.
59
59
60 For [keywordmaps] template mapping and expansion demonstration and
60 For [keywordmaps] template mapping and expansion demonstration and
61 control run :hg:`kwdemo`. See :hg:`help templates` for a list of
61 control run :hg:`kwdemo`. See :hg:`help templates` for a list of
62 available templates and filters.
62 available templates and filters.
63
63
64 Three additional date template filters are provided:
64 Three additional date template filters are provided:
65
65
66 :``utcdate``: "2006/09/18 15:13:13"
66 :``utcdate``: "2006/09/18 15:13:13"
67 :``svnutcdate``: "2006-09-18 15:13:13Z"
67 :``svnutcdate``: "2006-09-18 15:13:13Z"
68 :``svnisodate``: "2006-09-18 08:13:13 -700 (Mon, 18 Sep 2006)"
68 :``svnisodate``: "2006-09-18 08:13:13 -700 (Mon, 18 Sep 2006)"
69
69
70 The default template mappings (view with :hg:`kwdemo -d`) can be
70 The default template mappings (view with :hg:`kwdemo -d`) can be
71 replaced with customized keywords and templates. Again, run
71 replaced with customized keywords and templates. Again, run
72 :hg:`kwdemo` to control the results of your configuration changes.
72 :hg:`kwdemo` to control the results of your configuration changes.
73
73
74 Before changing/disabling active keywords, you must run :hg:`kwshrink`
74 Before changing/disabling active keywords, you must run :hg:`kwshrink`
75 to avoid storing expanded keywords in the change history.
75 to avoid storing expanded keywords in the change history.
76
76
77 To force expansion after enabling it, or a configuration change, run
77 To force expansion after enabling it, or a configuration change, run
78 :hg:`kwexpand`.
78 :hg:`kwexpand`.
79
79
80 Expansions spanning more than one line and incremental expansions,
80 Expansions spanning more than one line and incremental expansions,
81 like CVS' $Log$, are not supported. A keyword template map "Log =
81 like CVS' $Log$, are not supported. A keyword template map "Log =
82 {desc}" expands to the first line of the changeset description.
82 {desc}" expands to the first line of the changeset description.
83 '''
83 '''
84
84
85 from mercurial import commands, context, cmdutil, dispatch, filelog, extensions
85 from mercurial import commands, context, cmdutil, dispatch, filelog, extensions
86 from mercurial import localrepo, match, patch, templatefilters, templater, util
86 from mercurial import localrepo, match, patch, templatefilters, templater, util
87 from mercurial import scmutil
87 from mercurial import scmutil, pathutil
88 from mercurial.hgweb import webcommands
88 from mercurial.hgweb import webcommands
89 from mercurial.i18n import _
89 from mercurial.i18n import _
90 import os, re, shutil, tempfile
90 import os, re, shutil, tempfile
91
91
92 commands.optionalrepo += ' kwdemo'
92 commands.optionalrepo += ' kwdemo'
93 commands.inferrepo += ' kwexpand kwfiles kwshrink'
93 commands.inferrepo += ' kwexpand kwfiles kwshrink'
94
94
95 cmdtable = {}
95 cmdtable = {}
96 command = cmdutil.command(cmdtable)
96 command = cmdutil.command(cmdtable)
97 testedwith = 'internal'
97 testedwith = 'internal'
98
98
99 # hg commands that do not act on keywords
99 # hg commands that do not act on keywords
100 nokwcommands = ('add addremove annotate bundle export grep incoming init log'
100 nokwcommands = ('add addremove annotate bundle export grep incoming init log'
101 ' outgoing push tip verify convert email glog')
101 ' outgoing push tip verify convert email glog')
102
102
103 # hg commands that trigger expansion only when writing to working dir,
103 # hg commands that trigger expansion only when writing to working dir,
104 # not when reading filelog, and unexpand when reading from working dir
104 # not when reading filelog, and unexpand when reading from working dir
105 restricted = 'merge kwexpand kwshrink record qrecord resolve transplant'
105 restricted = 'merge kwexpand kwshrink record qrecord resolve transplant'
106
106
107 # names of extensions using dorecord
107 # names of extensions using dorecord
108 recordextensions = 'record'
108 recordextensions = 'record'
109
109
110 colortable = {
110 colortable = {
111 'kwfiles.enabled': 'green bold',
111 'kwfiles.enabled': 'green bold',
112 'kwfiles.deleted': 'cyan bold underline',
112 'kwfiles.deleted': 'cyan bold underline',
113 'kwfiles.enabledunknown': 'green',
113 'kwfiles.enabledunknown': 'green',
114 'kwfiles.ignored': 'bold',
114 'kwfiles.ignored': 'bold',
115 'kwfiles.ignoredunknown': 'none'
115 'kwfiles.ignoredunknown': 'none'
116 }
116 }
117
117
118 # date like in cvs' $Date
118 # date like in cvs' $Date
119 def utcdate(text):
119 def utcdate(text):
120 ''':utcdate: Date. Returns a UTC-date in this format: "2009/08/18 11:00:13".
120 ''':utcdate: Date. Returns a UTC-date in this format: "2009/08/18 11:00:13".
121 '''
121 '''
122 return util.datestr((util.parsedate(text)[0], 0), '%Y/%m/%d %H:%M:%S')
122 return util.datestr((util.parsedate(text)[0], 0), '%Y/%m/%d %H:%M:%S')
123 # date like in svn's $Date
123 # date like in svn's $Date
124 def svnisodate(text):
124 def svnisodate(text):
125 ''':svnisodate: Date. Returns a date in this format: "2009-08-18 13:00:13
125 ''':svnisodate: Date. Returns a date in this format: "2009-08-18 13:00:13
126 +0200 (Tue, 18 Aug 2009)".
126 +0200 (Tue, 18 Aug 2009)".
127 '''
127 '''
128 return util.datestr(text, '%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)')
128 return util.datestr(text, '%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)')
129 # date like in svn's $Id
129 # date like in svn's $Id
130 def svnutcdate(text):
130 def svnutcdate(text):
131 ''':svnutcdate: Date. Returns a UTC-date in this format: "2009-08-18
131 ''':svnutcdate: Date. Returns a UTC-date in this format: "2009-08-18
132 11:00:13Z".
132 11:00:13Z".
133 '''
133 '''
134 return util.datestr((util.parsedate(text)[0], 0), '%Y-%m-%d %H:%M:%SZ')
134 return util.datestr((util.parsedate(text)[0], 0), '%Y-%m-%d %H:%M:%SZ')
135
135
136 templatefilters.filters.update({'utcdate': utcdate,
136 templatefilters.filters.update({'utcdate': utcdate,
137 'svnisodate': svnisodate,
137 'svnisodate': svnisodate,
138 'svnutcdate': svnutcdate})
138 'svnutcdate': svnutcdate})
139
139
140 # make keyword tools accessible
140 # make keyword tools accessible
141 kwtools = {'templater': None, 'hgcmd': ''}
141 kwtools = {'templater': None, 'hgcmd': ''}
142
142
143 def _defaultkwmaps(ui):
143 def _defaultkwmaps(ui):
144 '''Returns default keywordmaps according to keywordset configuration.'''
144 '''Returns default keywordmaps according to keywordset configuration.'''
145 templates = {
145 templates = {
146 'Revision': '{node|short}',
146 'Revision': '{node|short}',
147 'Author': '{author|user}',
147 'Author': '{author|user}',
148 }
148 }
149 kwsets = ({
149 kwsets = ({
150 'Date': '{date|utcdate}',
150 'Date': '{date|utcdate}',
151 'RCSfile': '{file|basename},v',
151 'RCSfile': '{file|basename},v',
152 'RCSFile': '{file|basename},v', # kept for backwards compatibility
152 'RCSFile': '{file|basename},v', # kept for backwards compatibility
153 # with hg-keyword
153 # with hg-keyword
154 'Source': '{root}/{file},v',
154 'Source': '{root}/{file},v',
155 'Id': '{file|basename},v {node|short} {date|utcdate} {author|user}',
155 'Id': '{file|basename},v {node|short} {date|utcdate} {author|user}',
156 'Header': '{root}/{file},v {node|short} {date|utcdate} {author|user}',
156 'Header': '{root}/{file},v {node|short} {date|utcdate} {author|user}',
157 }, {
157 }, {
158 'Date': '{date|svnisodate}',
158 'Date': '{date|svnisodate}',
159 'Id': '{file|basename},v {node|short} {date|svnutcdate} {author|user}',
159 'Id': '{file|basename},v {node|short} {date|svnutcdate} {author|user}',
160 'LastChangedRevision': '{node|short}',
160 'LastChangedRevision': '{node|short}',
161 'LastChangedBy': '{author|user}',
161 'LastChangedBy': '{author|user}',
162 'LastChangedDate': '{date|svnisodate}',
162 'LastChangedDate': '{date|svnisodate}',
163 })
163 })
164 templates.update(kwsets[ui.configbool('keywordset', 'svn')])
164 templates.update(kwsets[ui.configbool('keywordset', 'svn')])
165 return templates
165 return templates
166
166
167 def _shrinktext(text, subfunc):
167 def _shrinktext(text, subfunc):
168 '''Helper for keyword expansion removal in text.
168 '''Helper for keyword expansion removal in text.
169 Depending on subfunc also returns number of substitutions.'''
169 Depending on subfunc also returns number of substitutions.'''
170 return subfunc(r'$\1$', text)
170 return subfunc(r'$\1$', text)
171
171
172 def _preselect(wstatus, changed):
172 def _preselect(wstatus, changed):
173 '''Retrieves modified and added files from a working directory state
173 '''Retrieves modified and added files from a working directory state
174 and returns the subset of each contained in given changed files
174 and returns the subset of each contained in given changed files
175 retrieved from a change context.'''
175 retrieved from a change context.'''
176 modified, added = wstatus[:2]
176 modified, added = wstatus[:2]
177 modified = [f for f in modified if f in changed]
177 modified = [f for f in modified if f in changed]
178 added = [f for f in added if f in changed]
178 added = [f for f in added if f in changed]
179 return modified, added
179 return modified, added
180
180
181
181
182 class kwtemplater(object):
182 class kwtemplater(object):
183 '''
183 '''
184 Sets up keyword templates, corresponding keyword regex, and
184 Sets up keyword templates, corresponding keyword regex, and
185 provides keyword substitution functions.
185 provides keyword substitution functions.
186 '''
186 '''
187
187
188 def __init__(self, ui, repo, inc, exc):
188 def __init__(self, ui, repo, inc, exc):
189 self.ui = ui
189 self.ui = ui
190 self.repo = repo
190 self.repo = repo
191 self.match = match.match(repo.root, '', [], inc, exc)
191 self.match = match.match(repo.root, '', [], inc, exc)
192 self.restrict = kwtools['hgcmd'] in restricted.split()
192 self.restrict = kwtools['hgcmd'] in restricted.split()
193 self.postcommit = False
193 self.postcommit = False
194
194
195 kwmaps = self.ui.configitems('keywordmaps')
195 kwmaps = self.ui.configitems('keywordmaps')
196 if kwmaps: # override default templates
196 if kwmaps: # override default templates
197 self.templates = dict((k, templater.parsestring(v, False))
197 self.templates = dict((k, templater.parsestring(v, False))
198 for k, v in kwmaps)
198 for k, v in kwmaps)
199 else:
199 else:
200 self.templates = _defaultkwmaps(self.ui)
200 self.templates = _defaultkwmaps(self.ui)
201
201
202 @util.propertycache
202 @util.propertycache
203 def escape(self):
203 def escape(self):
204 '''Returns bar-separated and escaped keywords.'''
204 '''Returns bar-separated and escaped keywords.'''
205 return '|'.join(map(re.escape, self.templates.keys()))
205 return '|'.join(map(re.escape, self.templates.keys()))
206
206
207 @util.propertycache
207 @util.propertycache
208 def rekw(self):
208 def rekw(self):
209 '''Returns regex for unexpanded keywords.'''
209 '''Returns regex for unexpanded keywords.'''
210 return re.compile(r'\$(%s)\$' % self.escape)
210 return re.compile(r'\$(%s)\$' % self.escape)
211
211
212 @util.propertycache
212 @util.propertycache
213 def rekwexp(self):
213 def rekwexp(self):
214 '''Returns regex for expanded keywords.'''
214 '''Returns regex for expanded keywords.'''
215 return re.compile(r'\$(%s): [^$\n\r]*? \$' % self.escape)
215 return re.compile(r'\$(%s): [^$\n\r]*? \$' % self.escape)
216
216
217 def substitute(self, data, path, ctx, subfunc):
217 def substitute(self, data, path, ctx, subfunc):
218 '''Replaces keywords in data with expanded template.'''
218 '''Replaces keywords in data with expanded template.'''
219 def kwsub(mobj):
219 def kwsub(mobj):
220 kw = mobj.group(1)
220 kw = mobj.group(1)
221 ct = cmdutil.changeset_templater(self.ui, self.repo,
221 ct = cmdutil.changeset_templater(self.ui, self.repo,
222 False, None, '', False)
222 False, None, '', False)
223 ct.use_template(self.templates[kw])
223 ct.use_template(self.templates[kw])
224 self.ui.pushbuffer()
224 self.ui.pushbuffer()
225 ct.show(ctx, root=self.repo.root, file=path)
225 ct.show(ctx, root=self.repo.root, file=path)
226 ekw = templatefilters.firstline(self.ui.popbuffer())
226 ekw = templatefilters.firstline(self.ui.popbuffer())
227 return '$%s: %s $' % (kw, ekw)
227 return '$%s: %s $' % (kw, ekw)
228 return subfunc(kwsub, data)
228 return subfunc(kwsub, data)
229
229
230 def linkctx(self, path, fileid):
230 def linkctx(self, path, fileid):
231 '''Similar to filelog.linkrev, but returns a changectx.'''
231 '''Similar to filelog.linkrev, but returns a changectx.'''
232 return self.repo.filectx(path, fileid=fileid).changectx()
232 return self.repo.filectx(path, fileid=fileid).changectx()
233
233
234 def expand(self, path, node, data):
234 def expand(self, path, node, data):
235 '''Returns data with keywords expanded.'''
235 '''Returns data with keywords expanded.'''
236 if not self.restrict and self.match(path) and not util.binary(data):
236 if not self.restrict and self.match(path) and not util.binary(data):
237 ctx = self.linkctx(path, node)
237 ctx = self.linkctx(path, node)
238 return self.substitute(data, path, ctx, self.rekw.sub)
238 return self.substitute(data, path, ctx, self.rekw.sub)
239 return data
239 return data
240
240
241 def iskwfile(self, cand, ctx):
241 def iskwfile(self, cand, ctx):
242 '''Returns subset of candidates which are configured for keyword
242 '''Returns subset of candidates which are configured for keyword
243 expansion but are not symbolic links.'''
243 expansion but are not symbolic links.'''
244 return [f for f in cand if self.match(f) and 'l' not in ctx.flags(f)]
244 return [f for f in cand if self.match(f) and 'l' not in ctx.flags(f)]
245
245
246 def overwrite(self, ctx, candidates, lookup, expand, rekw=False):
246 def overwrite(self, ctx, candidates, lookup, expand, rekw=False):
247 '''Overwrites selected files expanding/shrinking keywords.'''
247 '''Overwrites selected files expanding/shrinking keywords.'''
248 if self.restrict or lookup or self.postcommit: # exclude kw_copy
248 if self.restrict or lookup or self.postcommit: # exclude kw_copy
249 candidates = self.iskwfile(candidates, ctx)
249 candidates = self.iskwfile(candidates, ctx)
250 if not candidates:
250 if not candidates:
251 return
251 return
252 kwcmd = self.restrict and lookup # kwexpand/kwshrink
252 kwcmd = self.restrict and lookup # kwexpand/kwshrink
253 if self.restrict or expand and lookup:
253 if self.restrict or expand and lookup:
254 mf = ctx.manifest()
254 mf = ctx.manifest()
255 if self.restrict or rekw:
255 if self.restrict or rekw:
256 re_kw = self.rekw
256 re_kw = self.rekw
257 else:
257 else:
258 re_kw = self.rekwexp
258 re_kw = self.rekwexp
259 if expand:
259 if expand:
260 msg = _('overwriting %s expanding keywords\n')
260 msg = _('overwriting %s expanding keywords\n')
261 else:
261 else:
262 msg = _('overwriting %s shrinking keywords\n')
262 msg = _('overwriting %s shrinking keywords\n')
263 for f in candidates:
263 for f in candidates:
264 if self.restrict:
264 if self.restrict:
265 data = self.repo.file(f).read(mf[f])
265 data = self.repo.file(f).read(mf[f])
266 else:
266 else:
267 data = self.repo.wread(f)
267 data = self.repo.wread(f)
268 if util.binary(data):
268 if util.binary(data):
269 continue
269 continue
270 if expand:
270 if expand:
271 if lookup:
271 if lookup:
272 ctx = self.linkctx(f, mf[f])
272 ctx = self.linkctx(f, mf[f])
273 data, found = self.substitute(data, f, ctx, re_kw.subn)
273 data, found = self.substitute(data, f, ctx, re_kw.subn)
274 elif self.restrict:
274 elif self.restrict:
275 found = re_kw.search(data)
275 found = re_kw.search(data)
276 else:
276 else:
277 data, found = _shrinktext(data, re_kw.subn)
277 data, found = _shrinktext(data, re_kw.subn)
278 if found:
278 if found:
279 self.ui.note(msg % f)
279 self.ui.note(msg % f)
280 fp = self.repo.wopener(f, "wb", atomictemp=True)
280 fp = self.repo.wopener(f, "wb", atomictemp=True)
281 fp.write(data)
281 fp.write(data)
282 fp.close()
282 fp.close()
283 if kwcmd:
283 if kwcmd:
284 self.repo.dirstate.normal(f)
284 self.repo.dirstate.normal(f)
285 elif self.postcommit:
285 elif self.postcommit:
286 self.repo.dirstate.normallookup(f)
286 self.repo.dirstate.normallookup(f)
287
287
288 def shrink(self, fname, text):
288 def shrink(self, fname, text):
289 '''Returns text with all keyword substitutions removed.'''
289 '''Returns text with all keyword substitutions removed.'''
290 if self.match(fname) and not util.binary(text):
290 if self.match(fname) and not util.binary(text):
291 return _shrinktext(text, self.rekwexp.sub)
291 return _shrinktext(text, self.rekwexp.sub)
292 return text
292 return text
293
293
294 def shrinklines(self, fname, lines):
294 def shrinklines(self, fname, lines):
295 '''Returns lines with keyword substitutions removed.'''
295 '''Returns lines with keyword substitutions removed.'''
296 if self.match(fname):
296 if self.match(fname):
297 text = ''.join(lines)
297 text = ''.join(lines)
298 if not util.binary(text):
298 if not util.binary(text):
299 return _shrinktext(text, self.rekwexp.sub).splitlines(True)
299 return _shrinktext(text, self.rekwexp.sub).splitlines(True)
300 return lines
300 return lines
301
301
302 def wread(self, fname, data):
302 def wread(self, fname, data):
303 '''If in restricted mode returns data read from wdir with
303 '''If in restricted mode returns data read from wdir with
304 keyword substitutions removed.'''
304 keyword substitutions removed.'''
305 if self.restrict:
305 if self.restrict:
306 return self.shrink(fname, data)
306 return self.shrink(fname, data)
307 return data
307 return data
308
308
309 class kwfilelog(filelog.filelog):
309 class kwfilelog(filelog.filelog):
310 '''
310 '''
311 Subclass of filelog to hook into its read, add, cmp methods.
311 Subclass of filelog to hook into its read, add, cmp methods.
312 Keywords are "stored" unexpanded, and processed on reading.
312 Keywords are "stored" unexpanded, and processed on reading.
313 '''
313 '''
314 def __init__(self, opener, kwt, path):
314 def __init__(self, opener, kwt, path):
315 super(kwfilelog, self).__init__(opener, path)
315 super(kwfilelog, self).__init__(opener, path)
316 self.kwt = kwt
316 self.kwt = kwt
317 self.path = path
317 self.path = path
318
318
319 def read(self, node):
319 def read(self, node):
320 '''Expands keywords when reading filelog.'''
320 '''Expands keywords when reading filelog.'''
321 data = super(kwfilelog, self).read(node)
321 data = super(kwfilelog, self).read(node)
322 if self.renamed(node):
322 if self.renamed(node):
323 return data
323 return data
324 return self.kwt.expand(self.path, node, data)
324 return self.kwt.expand(self.path, node, data)
325
325
326 def add(self, text, meta, tr, link, p1=None, p2=None):
326 def add(self, text, meta, tr, link, p1=None, p2=None):
327 '''Removes keyword substitutions when adding to filelog.'''
327 '''Removes keyword substitutions when adding to filelog.'''
328 text = self.kwt.shrink(self.path, text)
328 text = self.kwt.shrink(self.path, text)
329 return super(kwfilelog, self).add(text, meta, tr, link, p1, p2)
329 return super(kwfilelog, self).add(text, meta, tr, link, p1, p2)
330
330
331 def cmp(self, node, text):
331 def cmp(self, node, text):
332 '''Removes keyword substitutions for comparison.'''
332 '''Removes keyword substitutions for comparison.'''
333 text = self.kwt.shrink(self.path, text)
333 text = self.kwt.shrink(self.path, text)
334 return super(kwfilelog, self).cmp(node, text)
334 return super(kwfilelog, self).cmp(node, text)
335
335
336 def _status(ui, repo, wctx, kwt, *pats, **opts):
336 def _status(ui, repo, wctx, kwt, *pats, **opts):
337 '''Bails out if [keyword] configuration is not active.
337 '''Bails out if [keyword] configuration is not active.
338 Returns status of working directory.'''
338 Returns status of working directory.'''
339 if kwt:
339 if kwt:
340 return repo.status(match=scmutil.match(wctx, pats, opts), clean=True,
340 return repo.status(match=scmutil.match(wctx, pats, opts), clean=True,
341 unknown=opts.get('unknown') or opts.get('all'))
341 unknown=opts.get('unknown') or opts.get('all'))
342 if ui.configitems('keyword'):
342 if ui.configitems('keyword'):
343 raise util.Abort(_('[keyword] patterns cannot match'))
343 raise util.Abort(_('[keyword] patterns cannot match'))
344 raise util.Abort(_('no [keyword] patterns configured'))
344 raise util.Abort(_('no [keyword] patterns configured'))
345
345
346 def _kwfwrite(ui, repo, expand, *pats, **opts):
346 def _kwfwrite(ui, repo, expand, *pats, **opts):
347 '''Selects files and passes them to kwtemplater.overwrite.'''
347 '''Selects files and passes them to kwtemplater.overwrite.'''
348 wctx = repo[None]
348 wctx = repo[None]
349 if len(wctx.parents()) > 1:
349 if len(wctx.parents()) > 1:
350 raise util.Abort(_('outstanding uncommitted merge'))
350 raise util.Abort(_('outstanding uncommitted merge'))
351 kwt = kwtools['templater']
351 kwt = kwtools['templater']
352 wlock = repo.wlock()
352 wlock = repo.wlock()
353 try:
353 try:
354 status = _status(ui, repo, wctx, kwt, *pats, **opts)
354 status = _status(ui, repo, wctx, kwt, *pats, **opts)
355 modified, added, removed, deleted, unknown, ignored, clean = status
355 modified, added, removed, deleted, unknown, ignored, clean = status
356 if modified or added or removed or deleted:
356 if modified or added or removed or deleted:
357 raise util.Abort(_('outstanding uncommitted changes'))
357 raise util.Abort(_('outstanding uncommitted changes'))
358 kwt.overwrite(wctx, clean, True, expand)
358 kwt.overwrite(wctx, clean, True, expand)
359 finally:
359 finally:
360 wlock.release()
360 wlock.release()
361
361
362 @command('kwdemo',
362 @command('kwdemo',
363 [('d', 'default', None, _('show default keyword template maps')),
363 [('d', 'default', None, _('show default keyword template maps')),
364 ('f', 'rcfile', '',
364 ('f', 'rcfile', '',
365 _('read maps from rcfile'), _('FILE'))],
365 _('read maps from rcfile'), _('FILE'))],
366 _('hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...'))
366 _('hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...'))
367 def demo(ui, repo, *args, **opts):
367 def demo(ui, repo, *args, **opts):
368 '''print [keywordmaps] configuration and an expansion example
368 '''print [keywordmaps] configuration and an expansion example
369
369
370 Show current, custom, or default keyword template maps and their
370 Show current, custom, or default keyword template maps and their
371 expansions.
371 expansions.
372
372
373 Extend the current configuration by specifying maps as arguments
373 Extend the current configuration by specifying maps as arguments
374 and using -f/--rcfile to source an external hgrc file.
374 and using -f/--rcfile to source an external hgrc file.
375
375
376 Use -d/--default to disable current configuration.
376 Use -d/--default to disable current configuration.
377
377
378 See :hg:`help templates` for information on templates and filters.
378 See :hg:`help templates` for information on templates and filters.
379 '''
379 '''
380 def demoitems(section, items):
380 def demoitems(section, items):
381 ui.write('[%s]\n' % section)
381 ui.write('[%s]\n' % section)
382 for k, v in sorted(items):
382 for k, v in sorted(items):
383 ui.write('%s = %s\n' % (k, v))
383 ui.write('%s = %s\n' % (k, v))
384
384
385 fn = 'demo.txt'
385 fn = 'demo.txt'
386 tmpdir = tempfile.mkdtemp('', 'kwdemo.')
386 tmpdir = tempfile.mkdtemp('', 'kwdemo.')
387 ui.note(_('creating temporary repository at %s\n') % tmpdir)
387 ui.note(_('creating temporary repository at %s\n') % tmpdir)
388 repo = localrepo.localrepository(repo.baseui, tmpdir, True)
388 repo = localrepo.localrepository(repo.baseui, tmpdir, True)
389 ui.setconfig('keyword', fn, '')
389 ui.setconfig('keyword', fn, '')
390 svn = ui.configbool('keywordset', 'svn')
390 svn = ui.configbool('keywordset', 'svn')
391 # explicitly set keywordset for demo output
391 # explicitly set keywordset for demo output
392 ui.setconfig('keywordset', 'svn', svn)
392 ui.setconfig('keywordset', 'svn', svn)
393
393
394 uikwmaps = ui.configitems('keywordmaps')
394 uikwmaps = ui.configitems('keywordmaps')
395 if args or opts.get('rcfile'):
395 if args or opts.get('rcfile'):
396 ui.status(_('\n\tconfiguration using custom keyword template maps\n'))
396 ui.status(_('\n\tconfiguration using custom keyword template maps\n'))
397 if uikwmaps:
397 if uikwmaps:
398 ui.status(_('\textending current template maps\n'))
398 ui.status(_('\textending current template maps\n'))
399 if opts.get('default') or not uikwmaps:
399 if opts.get('default') or not uikwmaps:
400 if svn:
400 if svn:
401 ui.status(_('\toverriding default svn keywordset\n'))
401 ui.status(_('\toverriding default svn keywordset\n'))
402 else:
402 else:
403 ui.status(_('\toverriding default cvs keywordset\n'))
403 ui.status(_('\toverriding default cvs keywordset\n'))
404 if opts.get('rcfile'):
404 if opts.get('rcfile'):
405 ui.readconfig(opts.get('rcfile'))
405 ui.readconfig(opts.get('rcfile'))
406 if args:
406 if args:
407 # simulate hgrc parsing
407 # simulate hgrc parsing
408 rcmaps = ['[keywordmaps]\n'] + [a + '\n' for a in args]
408 rcmaps = ['[keywordmaps]\n'] + [a + '\n' for a in args]
409 fp = repo.opener('hgrc', 'w')
409 fp = repo.opener('hgrc', 'w')
410 fp.writelines(rcmaps)
410 fp.writelines(rcmaps)
411 fp.close()
411 fp.close()
412 ui.readconfig(repo.join('hgrc'))
412 ui.readconfig(repo.join('hgrc'))
413 kwmaps = dict(ui.configitems('keywordmaps'))
413 kwmaps = dict(ui.configitems('keywordmaps'))
414 elif opts.get('default'):
414 elif opts.get('default'):
415 if svn:
415 if svn:
416 ui.status(_('\n\tconfiguration using default svn keywordset\n'))
416 ui.status(_('\n\tconfiguration using default svn keywordset\n'))
417 else:
417 else:
418 ui.status(_('\n\tconfiguration using default cvs keywordset\n'))
418 ui.status(_('\n\tconfiguration using default cvs keywordset\n'))
419 kwmaps = _defaultkwmaps(ui)
419 kwmaps = _defaultkwmaps(ui)
420 if uikwmaps:
420 if uikwmaps:
421 ui.status(_('\tdisabling current template maps\n'))
421 ui.status(_('\tdisabling current template maps\n'))
422 for k, v in kwmaps.iteritems():
422 for k, v in kwmaps.iteritems():
423 ui.setconfig('keywordmaps', k, v)
423 ui.setconfig('keywordmaps', k, v)
424 else:
424 else:
425 ui.status(_('\n\tconfiguration using current keyword template maps\n'))
425 ui.status(_('\n\tconfiguration using current keyword template maps\n'))
426 if uikwmaps:
426 if uikwmaps:
427 kwmaps = dict(uikwmaps)
427 kwmaps = dict(uikwmaps)
428 else:
428 else:
429 kwmaps = _defaultkwmaps(ui)
429 kwmaps = _defaultkwmaps(ui)
430
430
431 uisetup(ui)
431 uisetup(ui)
432 reposetup(ui, repo)
432 reposetup(ui, repo)
433 ui.write('[extensions]\nkeyword =\n')
433 ui.write('[extensions]\nkeyword =\n')
434 demoitems('keyword', ui.configitems('keyword'))
434 demoitems('keyword', ui.configitems('keyword'))
435 demoitems('keywordset', ui.configitems('keywordset'))
435 demoitems('keywordset', ui.configitems('keywordset'))
436 demoitems('keywordmaps', kwmaps.iteritems())
436 demoitems('keywordmaps', kwmaps.iteritems())
437 keywords = '$' + '$\n$'.join(sorted(kwmaps.keys())) + '$\n'
437 keywords = '$' + '$\n$'.join(sorted(kwmaps.keys())) + '$\n'
438 repo.wopener.write(fn, keywords)
438 repo.wopener.write(fn, keywords)
439 repo[None].add([fn])
439 repo[None].add([fn])
440 ui.note(_('\nkeywords written to %s:\n') % fn)
440 ui.note(_('\nkeywords written to %s:\n') % fn)
441 ui.note(keywords)
441 ui.note(keywords)
442 repo.dirstate.setbranch('demobranch')
442 repo.dirstate.setbranch('demobranch')
443 for name, cmd in ui.configitems('hooks'):
443 for name, cmd in ui.configitems('hooks'):
444 if name.split('.', 1)[0].find('commit') > -1:
444 if name.split('.', 1)[0].find('commit') > -1:
445 repo.ui.setconfig('hooks', name, '')
445 repo.ui.setconfig('hooks', name, '')
446 msg = _('hg keyword configuration and expansion example')
446 msg = _('hg keyword configuration and expansion example')
447 ui.note("hg ci -m '%s'\n" % msg) # check-code-ignore
447 ui.note("hg ci -m '%s'\n" % msg) # check-code-ignore
448 repo.commit(text=msg)
448 repo.commit(text=msg)
449 ui.status(_('\n\tkeywords expanded\n'))
449 ui.status(_('\n\tkeywords expanded\n'))
450 ui.write(repo.wread(fn))
450 ui.write(repo.wread(fn))
451 shutil.rmtree(tmpdir, ignore_errors=True)
451 shutil.rmtree(tmpdir, ignore_errors=True)
452
452
453 @command('kwexpand', commands.walkopts, _('hg kwexpand [OPTION]... [FILE]...'))
453 @command('kwexpand', commands.walkopts, _('hg kwexpand [OPTION]... [FILE]...'))
454 def expand(ui, repo, *pats, **opts):
454 def expand(ui, repo, *pats, **opts):
455 '''expand keywords in the working directory
455 '''expand keywords in the working directory
456
456
457 Run after (re)enabling keyword expansion.
457 Run after (re)enabling keyword expansion.
458
458
459 kwexpand refuses to run if given files contain local changes.
459 kwexpand refuses to run if given files contain local changes.
460 '''
460 '''
461 # 3rd argument sets expansion to True
461 # 3rd argument sets expansion to True
462 _kwfwrite(ui, repo, True, *pats, **opts)
462 _kwfwrite(ui, repo, True, *pats, **opts)
463
463
464 @command('kwfiles',
464 @command('kwfiles',
465 [('A', 'all', None, _('show keyword status flags of all files')),
465 [('A', 'all', None, _('show keyword status flags of all files')),
466 ('i', 'ignore', None, _('show files excluded from expansion')),
466 ('i', 'ignore', None, _('show files excluded from expansion')),
467 ('u', 'unknown', None, _('only show unknown (not tracked) files')),
467 ('u', 'unknown', None, _('only show unknown (not tracked) files')),
468 ] + commands.walkopts,
468 ] + commands.walkopts,
469 _('hg kwfiles [OPTION]... [FILE]...'))
469 _('hg kwfiles [OPTION]... [FILE]...'))
470 def files(ui, repo, *pats, **opts):
470 def files(ui, repo, *pats, **opts):
471 '''show files configured for keyword expansion
471 '''show files configured for keyword expansion
472
472
473 List which files in the working directory are matched by the
473 List which files in the working directory are matched by the
474 [keyword] configuration patterns.
474 [keyword] configuration patterns.
475
475
476 Useful to prevent inadvertent keyword expansion and to speed up
476 Useful to prevent inadvertent keyword expansion and to speed up
477 execution by including only files that are actual candidates for
477 execution by including only files that are actual candidates for
478 expansion.
478 expansion.
479
479
480 See :hg:`help keyword` on how to construct patterns both for
480 See :hg:`help keyword` on how to construct patterns both for
481 inclusion and exclusion of files.
481 inclusion and exclusion of files.
482
482
483 With -A/--all and -v/--verbose the codes used to show the status
483 With -A/--all and -v/--verbose the codes used to show the status
484 of files are::
484 of files are::
485
485
486 K = keyword expansion candidate
486 K = keyword expansion candidate
487 k = keyword expansion candidate (not tracked)
487 k = keyword expansion candidate (not tracked)
488 I = ignored
488 I = ignored
489 i = ignored (not tracked)
489 i = ignored (not tracked)
490 '''
490 '''
491 kwt = kwtools['templater']
491 kwt = kwtools['templater']
492 wctx = repo[None]
492 wctx = repo[None]
493 status = _status(ui, repo, wctx, kwt, *pats, **opts)
493 status = _status(ui, repo, wctx, kwt, *pats, **opts)
494 cwd = pats and repo.getcwd() or ''
494 cwd = pats and repo.getcwd() or ''
495 modified, added, removed, deleted, unknown, ignored, clean = status
495 modified, added, removed, deleted, unknown, ignored, clean = status
496 files = []
496 files = []
497 if not opts.get('unknown') or opts.get('all'):
497 if not opts.get('unknown') or opts.get('all'):
498 files = sorted(modified + added + clean)
498 files = sorted(modified + added + clean)
499 kwfiles = kwt.iskwfile(files, wctx)
499 kwfiles = kwt.iskwfile(files, wctx)
500 kwdeleted = kwt.iskwfile(deleted, wctx)
500 kwdeleted = kwt.iskwfile(deleted, wctx)
501 kwunknown = kwt.iskwfile(unknown, wctx)
501 kwunknown = kwt.iskwfile(unknown, wctx)
502 if not opts.get('ignore') or opts.get('all'):
502 if not opts.get('ignore') or opts.get('all'):
503 showfiles = kwfiles, kwdeleted, kwunknown
503 showfiles = kwfiles, kwdeleted, kwunknown
504 else:
504 else:
505 showfiles = [], [], []
505 showfiles = [], [], []
506 if opts.get('all') or opts.get('ignore'):
506 if opts.get('all') or opts.get('ignore'):
507 showfiles += ([f for f in files if f not in kwfiles],
507 showfiles += ([f for f in files if f not in kwfiles],
508 [f for f in unknown if f not in kwunknown])
508 [f for f in unknown if f not in kwunknown])
509 kwlabels = 'enabled deleted enabledunknown ignored ignoredunknown'.split()
509 kwlabels = 'enabled deleted enabledunknown ignored ignoredunknown'.split()
510 kwstates = zip(kwlabels, 'K!kIi', showfiles)
510 kwstates = zip(kwlabels, 'K!kIi', showfiles)
511 fm = ui.formatter('kwfiles', opts)
511 fm = ui.formatter('kwfiles', opts)
512 fmt = '%.0s%s\n'
512 fmt = '%.0s%s\n'
513 if opts.get('all') or ui.verbose:
513 if opts.get('all') or ui.verbose:
514 fmt = '%s %s\n'
514 fmt = '%s %s\n'
515 for kwstate, char, filenames in kwstates:
515 for kwstate, char, filenames in kwstates:
516 label = 'kwfiles.' + kwstate
516 label = 'kwfiles.' + kwstate
517 for f in filenames:
517 for f in filenames:
518 fm.startitem()
518 fm.startitem()
519 fm.write('kwstatus path', fmt, char,
519 fm.write('kwstatus path', fmt, char,
520 repo.pathto(f, cwd), label=label)
520 repo.pathto(f, cwd), label=label)
521 fm.end()
521 fm.end()
522
522
523 @command('kwshrink', commands.walkopts, _('hg kwshrink [OPTION]... [FILE]...'))
523 @command('kwshrink', commands.walkopts, _('hg kwshrink [OPTION]... [FILE]...'))
524 def shrink(ui, repo, *pats, **opts):
524 def shrink(ui, repo, *pats, **opts):
525 '''revert expanded keywords in the working directory
525 '''revert expanded keywords in the working directory
526
526
527 Must be run before changing/disabling active keywords.
527 Must be run before changing/disabling active keywords.
528
528
529 kwshrink refuses to run if given files contain local changes.
529 kwshrink refuses to run if given files contain local changes.
530 '''
530 '''
531 # 3rd argument sets expansion to False
531 # 3rd argument sets expansion to False
532 _kwfwrite(ui, repo, False, *pats, **opts)
532 _kwfwrite(ui, repo, False, *pats, **opts)
533
533
534
534
535 def uisetup(ui):
535 def uisetup(ui):
536 ''' Monkeypatches dispatch._parse to retrieve user command.'''
536 ''' Monkeypatches dispatch._parse to retrieve user command.'''
537
537
538 def kwdispatch_parse(orig, ui, args):
538 def kwdispatch_parse(orig, ui, args):
539 '''Monkeypatch dispatch._parse to obtain running hg command.'''
539 '''Monkeypatch dispatch._parse to obtain running hg command.'''
540 cmd, func, args, options, cmdoptions = orig(ui, args)
540 cmd, func, args, options, cmdoptions = orig(ui, args)
541 kwtools['hgcmd'] = cmd
541 kwtools['hgcmd'] = cmd
542 return cmd, func, args, options, cmdoptions
542 return cmd, func, args, options, cmdoptions
543
543
544 extensions.wrapfunction(dispatch, '_parse', kwdispatch_parse)
544 extensions.wrapfunction(dispatch, '_parse', kwdispatch_parse)
545
545
546 def reposetup(ui, repo):
546 def reposetup(ui, repo):
547 '''Sets up repo as kwrepo for keyword substitution.
547 '''Sets up repo as kwrepo for keyword substitution.
548 Overrides file method to return kwfilelog instead of filelog
548 Overrides file method to return kwfilelog instead of filelog
549 if file matches user configuration.
549 if file matches user configuration.
550 Wraps commit to overwrite configured files with updated
550 Wraps commit to overwrite configured files with updated
551 keyword substitutions.
551 keyword substitutions.
552 Monkeypatches patch and webcommands.'''
552 Monkeypatches patch and webcommands.'''
553
553
554 try:
554 try:
555 if (not repo.local() or kwtools['hgcmd'] in nokwcommands.split()
555 if (not repo.local() or kwtools['hgcmd'] in nokwcommands.split()
556 or '.hg' in util.splitpath(repo.root)
556 or '.hg' in util.splitpath(repo.root)
557 or repo._url.startswith('bundle:')):
557 or repo._url.startswith('bundle:')):
558 return
558 return
559 except AttributeError:
559 except AttributeError:
560 pass
560 pass
561
561
562 inc, exc = [], ['.hg*']
562 inc, exc = [], ['.hg*']
563 for pat, opt in ui.configitems('keyword'):
563 for pat, opt in ui.configitems('keyword'):
564 if opt != 'ignore':
564 if opt != 'ignore':
565 inc.append(pat)
565 inc.append(pat)
566 else:
566 else:
567 exc.append(pat)
567 exc.append(pat)
568 if not inc:
568 if not inc:
569 return
569 return
570
570
571 kwtools['templater'] = kwt = kwtemplater(ui, repo, inc, exc)
571 kwtools['templater'] = kwt = kwtemplater(ui, repo, inc, exc)
572
572
573 class kwrepo(repo.__class__):
573 class kwrepo(repo.__class__):
574 def file(self, f):
574 def file(self, f):
575 if f[0] == '/':
575 if f[0] == '/':
576 f = f[1:]
576 f = f[1:]
577 return kwfilelog(self.sopener, kwt, f)
577 return kwfilelog(self.sopener, kwt, f)
578
578
579 def wread(self, filename):
579 def wread(self, filename):
580 data = super(kwrepo, self).wread(filename)
580 data = super(kwrepo, self).wread(filename)
581 return kwt.wread(filename, data)
581 return kwt.wread(filename, data)
582
582
583 def commit(self, *args, **opts):
583 def commit(self, *args, **opts):
584 # use custom commitctx for user commands
584 # use custom commitctx for user commands
585 # other extensions can still wrap repo.commitctx directly
585 # other extensions can still wrap repo.commitctx directly
586 self.commitctx = self.kwcommitctx
586 self.commitctx = self.kwcommitctx
587 try:
587 try:
588 return super(kwrepo, self).commit(*args, **opts)
588 return super(kwrepo, self).commit(*args, **opts)
589 finally:
589 finally:
590 del self.commitctx
590 del self.commitctx
591
591
592 def kwcommitctx(self, ctx, error=False):
592 def kwcommitctx(self, ctx, error=False):
593 n = super(kwrepo, self).commitctx(ctx, error)
593 n = super(kwrepo, self).commitctx(ctx, error)
594 # no lock needed, only called from repo.commit() which already locks
594 # no lock needed, only called from repo.commit() which already locks
595 if not kwt.postcommit:
595 if not kwt.postcommit:
596 restrict = kwt.restrict
596 restrict = kwt.restrict
597 kwt.restrict = True
597 kwt.restrict = True
598 kwt.overwrite(self[n], sorted(ctx.added() + ctx.modified()),
598 kwt.overwrite(self[n], sorted(ctx.added() + ctx.modified()),
599 False, True)
599 False, True)
600 kwt.restrict = restrict
600 kwt.restrict = restrict
601 return n
601 return n
602
602
603 def rollback(self, dryrun=False, force=False):
603 def rollback(self, dryrun=False, force=False):
604 wlock = self.wlock()
604 wlock = self.wlock()
605 try:
605 try:
606 if not dryrun:
606 if not dryrun:
607 changed = self['.'].files()
607 changed = self['.'].files()
608 ret = super(kwrepo, self).rollback(dryrun, force)
608 ret = super(kwrepo, self).rollback(dryrun, force)
609 if not dryrun:
609 if not dryrun:
610 ctx = self['.']
610 ctx = self['.']
611 modified, added = _preselect(self[None].status(), changed)
611 modified, added = _preselect(self[None].status(), changed)
612 kwt.overwrite(ctx, modified, True, True)
612 kwt.overwrite(ctx, modified, True, True)
613 kwt.overwrite(ctx, added, True, False)
613 kwt.overwrite(ctx, added, True, False)
614 return ret
614 return ret
615 finally:
615 finally:
616 wlock.release()
616 wlock.release()
617
617
618 # monkeypatches
618 # monkeypatches
619 def kwpatchfile_init(orig, self, ui, gp, backend, store, eolmode=None):
619 def kwpatchfile_init(orig, self, ui, gp, backend, store, eolmode=None):
620 '''Monkeypatch/wrap patch.patchfile.__init__ to avoid
620 '''Monkeypatch/wrap patch.patchfile.__init__ to avoid
621 rejects or conflicts due to expanded keywords in working dir.'''
621 rejects or conflicts due to expanded keywords in working dir.'''
622 orig(self, ui, gp, backend, store, eolmode)
622 orig(self, ui, gp, backend, store, eolmode)
623 # shrink keywords read from working dir
623 # shrink keywords read from working dir
624 self.lines = kwt.shrinklines(self.fname, self.lines)
624 self.lines = kwt.shrinklines(self.fname, self.lines)
625
625
626 def kw_diff(orig, repo, node1=None, node2=None, match=None, changes=None,
626 def kw_diff(orig, repo, node1=None, node2=None, match=None, changes=None,
627 opts=None, prefix=''):
627 opts=None, prefix=''):
628 '''Monkeypatch patch.diff to avoid expansion.'''
628 '''Monkeypatch patch.diff to avoid expansion.'''
629 kwt.restrict = True
629 kwt.restrict = True
630 return orig(repo, node1, node2, match, changes, opts, prefix)
630 return orig(repo, node1, node2, match, changes, opts, prefix)
631
631
632 def kwweb_skip(orig, web, req, tmpl):
632 def kwweb_skip(orig, web, req, tmpl):
633 '''Wraps webcommands.x turning off keyword expansion.'''
633 '''Wraps webcommands.x turning off keyword expansion.'''
634 kwt.match = util.never
634 kwt.match = util.never
635 return orig(web, req, tmpl)
635 return orig(web, req, tmpl)
636
636
637 def kw_amend(orig, ui, repo, commitfunc, old, extra, pats, opts):
637 def kw_amend(orig, ui, repo, commitfunc, old, extra, pats, opts):
638 '''Wraps cmdutil.amend expanding keywords after amend.'''
638 '''Wraps cmdutil.amend expanding keywords after amend.'''
639 wlock = repo.wlock()
639 wlock = repo.wlock()
640 try:
640 try:
641 kwt.postcommit = True
641 kwt.postcommit = True
642 newid = orig(ui, repo, commitfunc, old, extra, pats, opts)
642 newid = orig(ui, repo, commitfunc, old, extra, pats, opts)
643 if newid != old.node():
643 if newid != old.node():
644 ctx = repo[newid]
644 ctx = repo[newid]
645 kwt.restrict = True
645 kwt.restrict = True
646 kwt.overwrite(ctx, ctx.files(), False, True)
646 kwt.overwrite(ctx, ctx.files(), False, True)
647 kwt.restrict = False
647 kwt.restrict = False
648 return newid
648 return newid
649 finally:
649 finally:
650 wlock.release()
650 wlock.release()
651
651
652 def kw_copy(orig, ui, repo, pats, opts, rename=False):
652 def kw_copy(orig, ui, repo, pats, opts, rename=False):
653 '''Wraps cmdutil.copy so that copy/rename destinations do not
653 '''Wraps cmdutil.copy so that copy/rename destinations do not
654 contain expanded keywords.
654 contain expanded keywords.
655 Note that the source of a regular file destination may also be a
655 Note that the source of a regular file destination may also be a
656 symlink:
656 symlink:
657 hg cp sym x -> x is symlink
657 hg cp sym x -> x is symlink
658 cp sym x; hg cp -A sym x -> x is file (maybe expanded keywords)
658 cp sym x; hg cp -A sym x -> x is file (maybe expanded keywords)
659 For the latter we have to follow the symlink to find out whether its
659 For the latter we have to follow the symlink to find out whether its
660 target is configured for expansion and we therefore must unexpand the
660 target is configured for expansion and we therefore must unexpand the
661 keywords in the destination.'''
661 keywords in the destination.'''
662 wlock = repo.wlock()
662 wlock = repo.wlock()
663 try:
663 try:
664 orig(ui, repo, pats, opts, rename)
664 orig(ui, repo, pats, opts, rename)
665 if opts.get('dry_run'):
665 if opts.get('dry_run'):
666 return
666 return
667 wctx = repo[None]
667 wctx = repo[None]
668 cwd = repo.getcwd()
668 cwd = repo.getcwd()
669
669
670 def haskwsource(dest):
670 def haskwsource(dest):
671 '''Returns true if dest is a regular file and configured for
671 '''Returns true if dest is a regular file and configured for
672 expansion or a symlink which points to a file configured for
672 expansion or a symlink which points to a file configured for
673 expansion. '''
673 expansion. '''
674 source = repo.dirstate.copied(dest)
674 source = repo.dirstate.copied(dest)
675 if 'l' in wctx.flags(source):
675 if 'l' in wctx.flags(source):
676 source = scmutil.canonpath(repo.root, cwd,
676 source = pathutil.canonpath(repo.root, cwd,
677 os.path.realpath(source))
677 os.path.realpath(source))
678 return kwt.match(source)
678 return kwt.match(source)
679
679
680 candidates = [f for f in repo.dirstate.copies() if
680 candidates = [f for f in repo.dirstate.copies() if
681 'l' not in wctx.flags(f) and haskwsource(f)]
681 'l' not in wctx.flags(f) and haskwsource(f)]
682 kwt.overwrite(wctx, candidates, False, False)
682 kwt.overwrite(wctx, candidates, False, False)
683 finally:
683 finally:
684 wlock.release()
684 wlock.release()
685
685
686 def kw_dorecord(orig, ui, repo, commitfunc, *pats, **opts):
686 def kw_dorecord(orig, ui, repo, commitfunc, *pats, **opts):
687 '''Wraps record.dorecord expanding keywords after recording.'''
687 '''Wraps record.dorecord expanding keywords after recording.'''
688 wlock = repo.wlock()
688 wlock = repo.wlock()
689 try:
689 try:
690 # record returns 0 even when nothing has changed
690 # record returns 0 even when nothing has changed
691 # therefore compare nodes before and after
691 # therefore compare nodes before and after
692 kwt.postcommit = True
692 kwt.postcommit = True
693 ctx = repo['.']
693 ctx = repo['.']
694 wstatus = repo[None].status()
694 wstatus = repo[None].status()
695 ret = orig(ui, repo, commitfunc, *pats, **opts)
695 ret = orig(ui, repo, commitfunc, *pats, **opts)
696 recctx = repo['.']
696 recctx = repo['.']
697 if ctx != recctx:
697 if ctx != recctx:
698 modified, added = _preselect(wstatus, recctx.files())
698 modified, added = _preselect(wstatus, recctx.files())
699 kwt.restrict = False
699 kwt.restrict = False
700 kwt.overwrite(recctx, modified, False, True)
700 kwt.overwrite(recctx, modified, False, True)
701 kwt.overwrite(recctx, added, False, True, True)
701 kwt.overwrite(recctx, added, False, True, True)
702 kwt.restrict = True
702 kwt.restrict = True
703 return ret
703 return ret
704 finally:
704 finally:
705 wlock.release()
705 wlock.release()
706
706
707 def kwfilectx_cmp(orig, self, fctx):
707 def kwfilectx_cmp(orig, self, fctx):
708 # keyword affects data size, comparing wdir and filelog size does
708 # keyword affects data size, comparing wdir and filelog size does
709 # not make sense
709 # not make sense
710 if (fctx._filerev is None and
710 if (fctx._filerev is None and
711 (self._repo._encodefilterpats or
711 (self._repo._encodefilterpats or
712 kwt.match(fctx.path()) and 'l' not in fctx.flags() or
712 kwt.match(fctx.path()) and 'l' not in fctx.flags() or
713 self.size() - 4 == fctx.size()) or
713 self.size() - 4 == fctx.size()) or
714 self.size() == fctx.size()):
714 self.size() == fctx.size()):
715 return self._filelog.cmp(self._filenode, fctx.data())
715 return self._filelog.cmp(self._filenode, fctx.data())
716 return True
716 return True
717
717
718 extensions.wrapfunction(context.filectx, 'cmp', kwfilectx_cmp)
718 extensions.wrapfunction(context.filectx, 'cmp', kwfilectx_cmp)
719 extensions.wrapfunction(patch.patchfile, '__init__', kwpatchfile_init)
719 extensions.wrapfunction(patch.patchfile, '__init__', kwpatchfile_init)
720 extensions.wrapfunction(patch, 'diff', kw_diff)
720 extensions.wrapfunction(patch, 'diff', kw_diff)
721 extensions.wrapfunction(cmdutil, 'amend', kw_amend)
721 extensions.wrapfunction(cmdutil, 'amend', kw_amend)
722 extensions.wrapfunction(cmdutil, 'copy', kw_copy)
722 extensions.wrapfunction(cmdutil, 'copy', kw_copy)
723 for c in 'annotate changeset rev filediff diff'.split():
723 for c in 'annotate changeset rev filediff diff'.split():
724 extensions.wrapfunction(webcommands, c, kwweb_skip)
724 extensions.wrapfunction(webcommands, c, kwweb_skip)
725 for name in recordextensions.split():
725 for name in recordextensions.split():
726 try:
726 try:
727 record = extensions.find(name)
727 record = extensions.find(name)
728 extensions.wrapfunction(record, 'dorecord', kw_dorecord)
728 extensions.wrapfunction(record, 'dorecord', kw_dorecord)
729 except KeyError:
729 except KeyError:
730 pass
730 pass
731
731
732 repo.__class__ = kwrepo
732 repo.__class__ = kwrepo
@@ -1,1218 +1,1218 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10
10
11 import os
11 import os
12 import copy
12 import copy
13
13
14 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
14 from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
15 node, archival, error, merge, discovery
15 node, archival, error, merge, discovery, pathutil
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial.node import hex
17 from mercurial.node import hex
18 from hgext import rebase
18 from hgext import rebase
19
19
20 import lfutil
20 import lfutil
21 import lfcommands
21 import lfcommands
22 import basestore
22 import basestore
23
23
24 # -- Utility functions: commonly/repeatedly needed functionality ---------------
24 # -- Utility functions: commonly/repeatedly needed functionality ---------------
25
25
26 def installnormalfilesmatchfn(manifest):
26 def installnormalfilesmatchfn(manifest):
27 '''overrides scmutil.match so that the matcher it returns will ignore all
27 '''overrides scmutil.match so that the matcher it returns will ignore all
28 largefiles'''
28 largefiles'''
29 oldmatch = None # for the closure
29 oldmatch = None # for the closure
30 def overridematch(ctx, pats=[], opts={}, globbed=False,
30 def overridematch(ctx, pats=[], opts={}, globbed=False,
31 default='relpath'):
31 default='relpath'):
32 match = oldmatch(ctx, pats, opts, globbed, default)
32 match = oldmatch(ctx, pats, opts, globbed, default)
33 m = copy.copy(match)
33 m = copy.copy(match)
34 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
34 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
35 manifest)
35 manifest)
36 m._files = filter(notlfile, m._files)
36 m._files = filter(notlfile, m._files)
37 m._fmap = set(m._files)
37 m._fmap = set(m._files)
38 m._always = False
38 m._always = False
39 origmatchfn = m.matchfn
39 origmatchfn = m.matchfn
40 m.matchfn = lambda f: notlfile(f) and origmatchfn(f) or None
40 m.matchfn = lambda f: notlfile(f) and origmatchfn(f) or None
41 return m
41 return m
42 oldmatch = installmatchfn(overridematch)
42 oldmatch = installmatchfn(overridematch)
43
43
44 def installmatchfn(f):
44 def installmatchfn(f):
45 oldmatch = scmutil.match
45 oldmatch = scmutil.match
46 setattr(f, 'oldmatch', oldmatch)
46 setattr(f, 'oldmatch', oldmatch)
47 scmutil.match = f
47 scmutil.match = f
48 return oldmatch
48 return oldmatch
49
49
50 def restorematchfn():
50 def restorematchfn():
51 '''restores scmutil.match to what it was before installnormalfilesmatchfn
51 '''restores scmutil.match to what it was before installnormalfilesmatchfn
52 was called. no-op if scmutil.match is its original function.
52 was called. no-op if scmutil.match is its original function.
53
53
54 Note that n calls to installnormalfilesmatchfn will require n calls to
54 Note that n calls to installnormalfilesmatchfn will require n calls to
55 restore matchfn to reverse'''
55 restore matchfn to reverse'''
56 scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
56 scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
57
57
58 def addlargefiles(ui, repo, *pats, **opts):
58 def addlargefiles(ui, repo, *pats, **opts):
59 large = opts.pop('large', None)
59 large = opts.pop('large', None)
60 lfsize = lfutil.getminsize(
60 lfsize = lfutil.getminsize(
61 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
61 ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
62
62
63 lfmatcher = None
63 lfmatcher = None
64 if lfutil.islfilesrepo(repo):
64 if lfutil.islfilesrepo(repo):
65 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
65 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
66 if lfpats:
66 if lfpats:
67 lfmatcher = match_.match(repo.root, '', list(lfpats))
67 lfmatcher = match_.match(repo.root, '', list(lfpats))
68
68
69 lfnames = []
69 lfnames = []
70 m = scmutil.match(repo[None], pats, opts)
70 m = scmutil.match(repo[None], pats, opts)
71 m.bad = lambda x, y: None
71 m.bad = lambda x, y: None
72 wctx = repo[None]
72 wctx = repo[None]
73 for f in repo.walk(m):
73 for f in repo.walk(m):
74 exact = m.exact(f)
74 exact = m.exact(f)
75 lfile = lfutil.standin(f) in wctx
75 lfile = lfutil.standin(f) in wctx
76 nfile = f in wctx
76 nfile = f in wctx
77 exists = lfile or nfile
77 exists = lfile or nfile
78
78
79 # Don't warn the user when they attempt to add a normal tracked file.
79 # Don't warn the user when they attempt to add a normal tracked file.
80 # The normal add code will do that for us.
80 # The normal add code will do that for us.
81 if exact and exists:
81 if exact and exists:
82 if lfile:
82 if lfile:
83 ui.warn(_('%s already a largefile\n') % f)
83 ui.warn(_('%s already a largefile\n') % f)
84 continue
84 continue
85
85
86 if (exact or not exists) and not lfutil.isstandin(f):
86 if (exact or not exists) and not lfutil.isstandin(f):
87 wfile = repo.wjoin(f)
87 wfile = repo.wjoin(f)
88
88
89 # In case the file was removed previously, but not committed
89 # In case the file was removed previously, but not committed
90 # (issue3507)
90 # (issue3507)
91 if not os.path.exists(wfile):
91 if not os.path.exists(wfile):
92 continue
92 continue
93
93
94 abovemin = (lfsize and
94 abovemin = (lfsize and
95 os.lstat(wfile).st_size >= lfsize * 1024 * 1024)
95 os.lstat(wfile).st_size >= lfsize * 1024 * 1024)
96 if large or abovemin or (lfmatcher and lfmatcher(f)):
96 if large or abovemin or (lfmatcher and lfmatcher(f)):
97 lfnames.append(f)
97 lfnames.append(f)
98 if ui.verbose or not exact:
98 if ui.verbose or not exact:
99 ui.status(_('adding %s as a largefile\n') % m.rel(f))
99 ui.status(_('adding %s as a largefile\n') % m.rel(f))
100
100
101 bad = []
101 bad = []
102 standins = []
102 standins = []
103
103
104 # Need to lock, otherwise there could be a race condition between
104 # Need to lock, otherwise there could be a race condition between
105 # when standins are created and added to the repo.
105 # when standins are created and added to the repo.
106 wlock = repo.wlock()
106 wlock = repo.wlock()
107 try:
107 try:
108 if not opts.get('dry_run'):
108 if not opts.get('dry_run'):
109 lfdirstate = lfutil.openlfdirstate(ui, repo)
109 lfdirstate = lfutil.openlfdirstate(ui, repo)
110 for f in lfnames:
110 for f in lfnames:
111 standinname = lfutil.standin(f)
111 standinname = lfutil.standin(f)
112 lfutil.writestandin(repo, standinname, hash='',
112 lfutil.writestandin(repo, standinname, hash='',
113 executable=lfutil.getexecutable(repo.wjoin(f)))
113 executable=lfutil.getexecutable(repo.wjoin(f)))
114 standins.append(standinname)
114 standins.append(standinname)
115 if lfdirstate[f] == 'r':
115 if lfdirstate[f] == 'r':
116 lfdirstate.normallookup(f)
116 lfdirstate.normallookup(f)
117 else:
117 else:
118 lfdirstate.add(f)
118 lfdirstate.add(f)
119 lfdirstate.write()
119 lfdirstate.write()
120 bad += [lfutil.splitstandin(f)
120 bad += [lfutil.splitstandin(f)
121 for f in repo[None].add(standins)
121 for f in repo[None].add(standins)
122 if f in m.files()]
122 if f in m.files()]
123 finally:
123 finally:
124 wlock.release()
124 wlock.release()
125 return bad
125 return bad
126
126
127 def removelargefiles(ui, repo, *pats, **opts):
127 def removelargefiles(ui, repo, *pats, **opts):
128 after = opts.get('after')
128 after = opts.get('after')
129 if not pats and not after:
129 if not pats and not after:
130 raise util.Abort(_('no files specified'))
130 raise util.Abort(_('no files specified'))
131 m = scmutil.match(repo[None], pats, opts)
131 m = scmutil.match(repo[None], pats, opts)
132 try:
132 try:
133 repo.lfstatus = True
133 repo.lfstatus = True
134 s = repo.status(match=m, clean=True)
134 s = repo.status(match=m, clean=True)
135 finally:
135 finally:
136 repo.lfstatus = False
136 repo.lfstatus = False
137 manifest = repo[None].manifest()
137 manifest = repo[None].manifest()
138 modified, added, deleted, clean = [[f for f in list
138 modified, added, deleted, clean = [[f for f in list
139 if lfutil.standin(f) in manifest]
139 if lfutil.standin(f) in manifest]
140 for list in [s[0], s[1], s[3], s[6]]]
140 for list in [s[0], s[1], s[3], s[6]]]
141
141
142 def warn(files, msg):
142 def warn(files, msg):
143 for f in files:
143 for f in files:
144 ui.warn(msg % m.rel(f))
144 ui.warn(msg % m.rel(f))
145 return int(len(files) > 0)
145 return int(len(files) > 0)
146
146
147 result = 0
147 result = 0
148
148
149 if after:
149 if after:
150 remove, forget = deleted, []
150 remove, forget = deleted, []
151 result = warn(modified + added + clean,
151 result = warn(modified + added + clean,
152 _('not removing %s: file still exists\n'))
152 _('not removing %s: file still exists\n'))
153 else:
153 else:
154 remove, forget = deleted + clean, []
154 remove, forget = deleted + clean, []
155 result = warn(modified, _('not removing %s: file is modified (use -f'
155 result = warn(modified, _('not removing %s: file is modified (use -f'
156 ' to force removal)\n'))
156 ' to force removal)\n'))
157 result = warn(added, _('not removing %s: file has been marked for add'
157 result = warn(added, _('not removing %s: file has been marked for add'
158 ' (use forget to undo)\n')) or result
158 ' (use forget to undo)\n')) or result
159
159
160 for f in sorted(remove + forget):
160 for f in sorted(remove + forget):
161 if ui.verbose or not m.exact(f):
161 if ui.verbose or not m.exact(f):
162 ui.status(_('removing %s\n') % m.rel(f))
162 ui.status(_('removing %s\n') % m.rel(f))
163
163
164 # Need to lock because standin files are deleted then removed from the
164 # Need to lock because standin files are deleted then removed from the
165 # repository and we could race in-between.
165 # repository and we could race in-between.
166 wlock = repo.wlock()
166 wlock = repo.wlock()
167 try:
167 try:
168 lfdirstate = lfutil.openlfdirstate(ui, repo)
168 lfdirstate = lfutil.openlfdirstate(ui, repo)
169 for f in remove:
169 for f in remove:
170 if not after:
170 if not after:
171 # If this is being called by addremove, notify the user that we
171 # If this is being called by addremove, notify the user that we
172 # are removing the file.
172 # are removing the file.
173 if getattr(repo, "_isaddremove", False):
173 if getattr(repo, "_isaddremove", False):
174 ui.status(_('removing %s\n') % f)
174 ui.status(_('removing %s\n') % f)
175 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
175 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
176 lfdirstate.remove(f)
176 lfdirstate.remove(f)
177 lfdirstate.write()
177 lfdirstate.write()
178 forget = [lfutil.standin(f) for f in forget]
178 forget = [lfutil.standin(f) for f in forget]
179 remove = [lfutil.standin(f) for f in remove]
179 remove = [lfutil.standin(f) for f in remove]
180 repo[None].forget(forget)
180 repo[None].forget(forget)
181 # If this is being called by addremove, let the original addremove
181 # If this is being called by addremove, let the original addremove
182 # function handle this.
182 # function handle this.
183 if not getattr(repo, "_isaddremove", False):
183 if not getattr(repo, "_isaddremove", False):
184 for f in remove:
184 for f in remove:
185 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
185 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
186 repo[None].forget(remove)
186 repo[None].forget(remove)
187 finally:
187 finally:
188 wlock.release()
188 wlock.release()
189
189
190 return result
190 return result
191
191
192 # For overriding mercurial.hgweb.webcommands so that largefiles will
192 # For overriding mercurial.hgweb.webcommands so that largefiles will
193 # appear at their right place in the manifests.
193 # appear at their right place in the manifests.
194 def decodepath(orig, path):
194 def decodepath(orig, path):
195 return lfutil.splitstandin(path) or path
195 return lfutil.splitstandin(path) or path
196
196
197 # -- Wrappers: modify existing commands --------------------------------
197 # -- Wrappers: modify existing commands --------------------------------
198
198
199 # Add works by going through the files that the user wanted to add and
199 # Add works by going through the files that the user wanted to add and
200 # checking if they should be added as largefiles. Then it makes a new
200 # checking if they should be added as largefiles. Then it makes a new
201 # matcher which matches only the normal files and runs the original
201 # matcher which matches only the normal files and runs the original
202 # version of add.
202 # version of add.
203 def overrideadd(orig, ui, repo, *pats, **opts):
203 def overrideadd(orig, ui, repo, *pats, **opts):
204 normal = opts.pop('normal')
204 normal = opts.pop('normal')
205 if normal:
205 if normal:
206 if opts.get('large'):
206 if opts.get('large'):
207 raise util.Abort(_('--normal cannot be used with --large'))
207 raise util.Abort(_('--normal cannot be used with --large'))
208 return orig(ui, repo, *pats, **opts)
208 return orig(ui, repo, *pats, **opts)
209 bad = addlargefiles(ui, repo, *pats, **opts)
209 bad = addlargefiles(ui, repo, *pats, **opts)
210 installnormalfilesmatchfn(repo[None].manifest())
210 installnormalfilesmatchfn(repo[None].manifest())
211 result = orig(ui, repo, *pats, **opts)
211 result = orig(ui, repo, *pats, **opts)
212 restorematchfn()
212 restorematchfn()
213
213
214 return (result == 1 or bad) and 1 or 0
214 return (result == 1 or bad) and 1 or 0
215
215
216 def overrideremove(orig, ui, repo, *pats, **opts):
216 def overrideremove(orig, ui, repo, *pats, **opts):
217 installnormalfilesmatchfn(repo[None].manifest())
217 installnormalfilesmatchfn(repo[None].manifest())
218 result = orig(ui, repo, *pats, **opts)
218 result = orig(ui, repo, *pats, **opts)
219 restorematchfn()
219 restorematchfn()
220 return removelargefiles(ui, repo, *pats, **opts) or result
220 return removelargefiles(ui, repo, *pats, **opts) or result
221
221
222 def overridestatusfn(orig, repo, rev2, **opts):
222 def overridestatusfn(orig, repo, rev2, **opts):
223 try:
223 try:
224 repo._repo.lfstatus = True
224 repo._repo.lfstatus = True
225 return orig(repo, rev2, **opts)
225 return orig(repo, rev2, **opts)
226 finally:
226 finally:
227 repo._repo.lfstatus = False
227 repo._repo.lfstatus = False
228
228
229 def overridestatus(orig, ui, repo, *pats, **opts):
229 def overridestatus(orig, ui, repo, *pats, **opts):
230 try:
230 try:
231 repo.lfstatus = True
231 repo.lfstatus = True
232 return orig(ui, repo, *pats, **opts)
232 return orig(ui, repo, *pats, **opts)
233 finally:
233 finally:
234 repo.lfstatus = False
234 repo.lfstatus = False
235
235
236 def overridedirty(orig, repo, ignoreupdate=False):
236 def overridedirty(orig, repo, ignoreupdate=False):
237 try:
237 try:
238 repo._repo.lfstatus = True
238 repo._repo.lfstatus = True
239 return orig(repo, ignoreupdate)
239 return orig(repo, ignoreupdate)
240 finally:
240 finally:
241 repo._repo.lfstatus = False
241 repo._repo.lfstatus = False
242
242
243 def overridelog(orig, ui, repo, *pats, **opts):
243 def overridelog(orig, ui, repo, *pats, **opts):
244 def overridematch(ctx, pats=[], opts={}, globbed=False,
244 def overridematch(ctx, pats=[], opts={}, globbed=False,
245 default='relpath'):
245 default='relpath'):
246 """Matcher that merges root directory with .hglf, suitable for log.
246 """Matcher that merges root directory with .hglf, suitable for log.
247 It is still possible to match .hglf directly.
247 It is still possible to match .hglf directly.
248 For any listed files run log on the standin too.
248 For any listed files run log on the standin too.
249 matchfn tries both the given filename and with .hglf stripped.
249 matchfn tries both the given filename and with .hglf stripped.
250 """
250 """
251 match = oldmatch(ctx, pats, opts, globbed, default)
251 match = oldmatch(ctx, pats, opts, globbed, default)
252 m = copy.copy(match)
252 m = copy.copy(match)
253 for i in range(0, len(m._files)):
253 for i in range(0, len(m._files)):
254 standin = lfutil.standin(m._files[i])
254 standin = lfutil.standin(m._files[i])
255 if standin in repo[ctx.node()]:
255 if standin in repo[ctx.node()]:
256 m._files[i] = standin
256 m._files[i] = standin
257 m._fmap = set(m._files)
257 m._fmap = set(m._files)
258 m._always = False
258 m._always = False
259 origmatchfn = m.matchfn
259 origmatchfn = m.matchfn
260 def lfmatchfn(f):
260 def lfmatchfn(f):
261 lf = lfutil.splitstandin(f)
261 lf = lfutil.splitstandin(f)
262 if lf is not None and origmatchfn(lf):
262 if lf is not None and origmatchfn(lf):
263 return True
263 return True
264 r = origmatchfn(f)
264 r = origmatchfn(f)
265 return r
265 return r
266 m.matchfn = lfmatchfn
266 m.matchfn = lfmatchfn
267 return m
267 return m
268 oldmatch = installmatchfn(overridematch)
268 oldmatch = installmatchfn(overridematch)
269 try:
269 try:
270 repo.lfstatus = True
270 repo.lfstatus = True
271 return orig(ui, repo, *pats, **opts)
271 return orig(ui, repo, *pats, **opts)
272 finally:
272 finally:
273 repo.lfstatus = False
273 repo.lfstatus = False
274 restorematchfn()
274 restorematchfn()
275
275
276 def overrideverify(orig, ui, repo, *pats, **opts):
276 def overrideverify(orig, ui, repo, *pats, **opts):
277 large = opts.pop('large', False)
277 large = opts.pop('large', False)
278 all = opts.pop('lfa', False)
278 all = opts.pop('lfa', False)
279 contents = opts.pop('lfc', False)
279 contents = opts.pop('lfc', False)
280
280
281 result = orig(ui, repo, *pats, **opts)
281 result = orig(ui, repo, *pats, **opts)
282 if large or all or contents:
282 if large or all or contents:
283 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
283 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
284 return result
284 return result
285
285
286 def overridedebugstate(orig, ui, repo, *pats, **opts):
286 def overridedebugstate(orig, ui, repo, *pats, **opts):
287 large = opts.pop('large', False)
287 large = opts.pop('large', False)
288 if large:
288 if large:
289 lfcommands.debugdirstate(ui, repo)
289 lfcommands.debugdirstate(ui, repo)
290 else:
290 else:
291 orig(ui, repo, *pats, **opts)
291 orig(ui, repo, *pats, **opts)
292
292
293 # Override needs to refresh standins so that update's normal merge
293 # Override needs to refresh standins so that update's normal merge
294 # will go through properly. Then the other update hook (overriding repo.update)
294 # will go through properly. Then the other update hook (overriding repo.update)
295 # will get the new files. Filemerge is also overridden so that the merge
295 # will get the new files. Filemerge is also overridden so that the merge
296 # will merge standins correctly.
296 # will merge standins correctly.
297 def overrideupdate(orig, ui, repo, *pats, **opts):
297 def overrideupdate(orig, ui, repo, *pats, **opts):
298 lfdirstate = lfutil.openlfdirstate(ui, repo)
298 lfdirstate = lfutil.openlfdirstate(ui, repo)
299 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
299 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
300 False, False)
300 False, False)
301 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
301 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
302
302
303 # Need to lock between the standins getting updated and their
303 # Need to lock between the standins getting updated and their
304 # largefiles getting updated
304 # largefiles getting updated
305 wlock = repo.wlock()
305 wlock = repo.wlock()
306 try:
306 try:
307 if opts['check']:
307 if opts['check']:
308 mod = len(modified) > 0
308 mod = len(modified) > 0
309 for lfile in unsure:
309 for lfile in unsure:
310 standin = lfutil.standin(lfile)
310 standin = lfutil.standin(lfile)
311 if repo['.'][standin].data().strip() != \
311 if repo['.'][standin].data().strip() != \
312 lfutil.hashfile(repo.wjoin(lfile)):
312 lfutil.hashfile(repo.wjoin(lfile)):
313 mod = True
313 mod = True
314 else:
314 else:
315 lfdirstate.normal(lfile)
315 lfdirstate.normal(lfile)
316 lfdirstate.write()
316 lfdirstate.write()
317 if mod:
317 if mod:
318 raise util.Abort(_('uncommitted changes'))
318 raise util.Abort(_('uncommitted changes'))
319 # XXX handle removed differently
319 # XXX handle removed differently
320 if not opts['clean']:
320 if not opts['clean']:
321 for lfile in unsure + modified + added:
321 for lfile in unsure + modified + added:
322 lfutil.updatestandin(repo, lfutil.standin(lfile))
322 lfutil.updatestandin(repo, lfutil.standin(lfile))
323 finally:
323 finally:
324 wlock.release()
324 wlock.release()
325 return orig(ui, repo, *pats, **opts)
325 return orig(ui, repo, *pats, **opts)
326
326
327 # Before starting the manifest merge, merge.updates will call
327 # Before starting the manifest merge, merge.updates will call
328 # _checkunknown to check if there are any files in the merged-in
328 # _checkunknown to check if there are any files in the merged-in
329 # changeset that collide with unknown files in the working copy.
329 # changeset that collide with unknown files in the working copy.
330 #
330 #
331 # The largefiles are seen as unknown, so this prevents us from merging
331 # The largefiles are seen as unknown, so this prevents us from merging
332 # in a file 'foo' if we already have a largefile with the same name.
332 # in a file 'foo' if we already have a largefile with the same name.
333 #
333 #
334 # The overridden function filters the unknown files by removing any
334 # The overridden function filters the unknown files by removing any
335 # largefiles. This makes the merge proceed and we can then handle this
335 # largefiles. This makes the merge proceed and we can then handle this
336 # case further in the overridden manifestmerge function below.
336 # case further in the overridden manifestmerge function below.
337 def overridecheckunknownfile(origfn, repo, wctx, mctx, f):
337 def overridecheckunknownfile(origfn, repo, wctx, mctx, f):
338 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
338 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
339 return False
339 return False
340 return origfn(repo, wctx, mctx, f)
340 return origfn(repo, wctx, mctx, f)
341
341
342 # The manifest merge handles conflicts on the manifest level. We want
342 # The manifest merge handles conflicts on the manifest level. We want
343 # to handle changes in largefile-ness of files at this level too.
343 # to handle changes in largefile-ness of files at this level too.
344 #
344 #
345 # The strategy is to run the original manifestmerge and then process
345 # The strategy is to run the original manifestmerge and then process
346 # the action list it outputs. There are two cases we need to deal with:
346 # the action list it outputs. There are two cases we need to deal with:
347 #
347 #
348 # 1. Normal file in p1, largefile in p2. Here the largefile is
348 # 1. Normal file in p1, largefile in p2. Here the largefile is
349 # detected via its standin file, which will enter the working copy
349 # detected via its standin file, which will enter the working copy
350 # with a "get" action. It is not "merge" since the standin is all
350 # with a "get" action. It is not "merge" since the standin is all
351 # Mercurial is concerned with at this level -- the link to the
351 # Mercurial is concerned with at this level -- the link to the
352 # existing normal file is not relevant here.
352 # existing normal file is not relevant here.
353 #
353 #
354 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
354 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
355 # since the largefile will be present in the working copy and
355 # since the largefile will be present in the working copy and
356 # different from the normal file in p2. Mercurial therefore
356 # different from the normal file in p2. Mercurial therefore
357 # triggers a merge action.
357 # triggers a merge action.
358 #
358 #
359 # In both cases, we prompt the user and emit new actions to either
359 # In both cases, we prompt the user and emit new actions to either
360 # remove the standin (if the normal file was kept) or to remove the
360 # remove the standin (if the normal file was kept) or to remove the
361 # normal file and get the standin (if the largefile was kept). The
361 # normal file and get the standin (if the largefile was kept). The
362 # default prompt answer is to use the largefile version since it was
362 # default prompt answer is to use the largefile version since it was
363 # presumably changed on purpose.
363 # presumably changed on purpose.
364 #
364 #
365 # Finally, the merge.applyupdates function will then take care of
365 # Finally, the merge.applyupdates function will then take care of
366 # writing the files into the working copy and lfcommands.updatelfiles
366 # writing the files into the working copy and lfcommands.updatelfiles
367 # will update the largefiles.
367 # will update the largefiles.
368 def overridemanifestmerge(origfn, repo, p1, p2, pa, branchmerge, force,
368 def overridemanifestmerge(origfn, repo, p1, p2, pa, branchmerge, force,
369 partial, acceptremote=False):
369 partial, acceptremote=False):
370 overwrite = force and not branchmerge
370 overwrite = force and not branchmerge
371 actions = origfn(repo, p1, p2, pa, branchmerge, force, partial,
371 actions = origfn(repo, p1, p2, pa, branchmerge, force, partial,
372 acceptremote)
372 acceptremote)
373
373
374 if overwrite:
374 if overwrite:
375 return actions
375 return actions
376
376
377 removes = set(a[0] for a in actions if a[1] == 'r')
377 removes = set(a[0] for a in actions if a[1] == 'r')
378 processed = []
378 processed = []
379
379
380 for action in actions:
380 for action in actions:
381 f, m, args, msg = action
381 f, m, args, msg = action
382
382
383 splitstandin = lfutil.splitstandin(f)
383 splitstandin = lfutil.splitstandin(f)
384 if (m == "g" and splitstandin is not None and
384 if (m == "g" and splitstandin is not None and
385 splitstandin in p1 and splitstandin not in removes):
385 splitstandin in p1 and splitstandin not in removes):
386 # Case 1: normal file in the working copy, largefile in
386 # Case 1: normal file in the working copy, largefile in
387 # the second parent
387 # the second parent
388 lfile = splitstandin
388 lfile = splitstandin
389 standin = f
389 standin = f
390 msg = _('remote turned local normal file %s into a largefile\n'
390 msg = _('remote turned local normal file %s into a largefile\n'
391 'use (l)argefile or keep (n)ormal file?'
391 'use (l)argefile or keep (n)ormal file?'
392 '$$ &Largefile $$ &Normal file') % lfile
392 '$$ &Largefile $$ &Normal file') % lfile
393 if repo.ui.promptchoice(msg, 0) == 0:
393 if repo.ui.promptchoice(msg, 0) == 0:
394 processed.append((lfile, "r", None, msg))
394 processed.append((lfile, "r", None, msg))
395 processed.append((standin, "g", (p2.flags(standin),), msg))
395 processed.append((standin, "g", (p2.flags(standin),), msg))
396 else:
396 else:
397 processed.append((standin, "r", None, msg))
397 processed.append((standin, "r", None, msg))
398 elif (m == "g" and
398 elif (m == "g" and
399 lfutil.standin(f) in p1 and lfutil.standin(f) not in removes):
399 lfutil.standin(f) in p1 and lfutil.standin(f) not in removes):
400 # Case 2: largefile in the working copy, normal file in
400 # Case 2: largefile in the working copy, normal file in
401 # the second parent
401 # the second parent
402 standin = lfutil.standin(f)
402 standin = lfutil.standin(f)
403 lfile = f
403 lfile = f
404 msg = _('remote turned local largefile %s into a normal file\n'
404 msg = _('remote turned local largefile %s into a normal file\n'
405 'keep (l)argefile or use (n)ormal file?'
405 'keep (l)argefile or use (n)ormal file?'
406 '$$ &Largefile $$ &Normal file') % lfile
406 '$$ &Largefile $$ &Normal file') % lfile
407 if repo.ui.promptchoice(msg, 0) == 0:
407 if repo.ui.promptchoice(msg, 0) == 0:
408 processed.append((lfile, "r", None, msg))
408 processed.append((lfile, "r", None, msg))
409 else:
409 else:
410 processed.append((standin, "r", None, msg))
410 processed.append((standin, "r", None, msg))
411 processed.append((lfile, "g", (p2.flags(lfile),), msg))
411 processed.append((lfile, "g", (p2.flags(lfile),), msg))
412 else:
412 else:
413 processed.append(action)
413 processed.append(action)
414
414
415 return processed
415 return processed
416
416
417 # Override filemerge to prompt the user about how they wish to merge
417 # Override filemerge to prompt the user about how they wish to merge
418 # largefiles. This will handle identical edits, and copy/rename +
418 # largefiles. This will handle identical edits, and copy/rename +
419 # edit without prompting the user.
419 # edit without prompting the user.
420 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca):
420 def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca):
421 # Use better variable names here. Because this is a wrapper we cannot
421 # Use better variable names here. Because this is a wrapper we cannot
422 # change the variable names in the function declaration.
422 # change the variable names in the function declaration.
423 fcdest, fcother, fcancestor = fcd, fco, fca
423 fcdest, fcother, fcancestor = fcd, fco, fca
424 if not lfutil.isstandin(orig):
424 if not lfutil.isstandin(orig):
425 return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
425 return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
426 else:
426 else:
427 if not fcother.cmp(fcdest): # files identical?
427 if not fcother.cmp(fcdest): # files identical?
428 return None
428 return None
429
429
430 # backwards, use working dir parent as ancestor
430 # backwards, use working dir parent as ancestor
431 if fcancestor == fcother:
431 if fcancestor == fcother:
432 fcancestor = fcdest.parents()[0]
432 fcancestor = fcdest.parents()[0]
433
433
434 if orig != fcother.path():
434 if orig != fcother.path():
435 repo.ui.status(_('merging %s and %s to %s\n')
435 repo.ui.status(_('merging %s and %s to %s\n')
436 % (lfutil.splitstandin(orig),
436 % (lfutil.splitstandin(orig),
437 lfutil.splitstandin(fcother.path()),
437 lfutil.splitstandin(fcother.path()),
438 lfutil.splitstandin(fcdest.path())))
438 lfutil.splitstandin(fcdest.path())))
439 else:
439 else:
440 repo.ui.status(_('merging %s\n')
440 repo.ui.status(_('merging %s\n')
441 % lfutil.splitstandin(fcdest.path()))
441 % lfutil.splitstandin(fcdest.path()))
442
442
443 if fcancestor.path() != fcother.path() and fcother.data() == \
443 if fcancestor.path() != fcother.path() and fcother.data() == \
444 fcancestor.data():
444 fcancestor.data():
445 return 0
445 return 0
446 if fcancestor.path() != fcdest.path() and fcdest.data() == \
446 if fcancestor.path() != fcdest.path() and fcdest.data() == \
447 fcancestor.data():
447 fcancestor.data():
448 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
448 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
449 return 0
449 return 0
450
450
451 if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
451 if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
452 'keep (l)ocal or take (o)ther?'
452 'keep (l)ocal or take (o)ther?'
453 '$$ &Local $$ &Other') %
453 '$$ &Local $$ &Other') %
454 lfutil.splitstandin(orig), 0) == 0:
454 lfutil.splitstandin(orig), 0) == 0:
455 return 0
455 return 0
456 else:
456 else:
457 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
457 repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
458 return 0
458 return 0
459
459
460 # Copy first changes the matchers to match standins instead of
460 # Copy first changes the matchers to match standins instead of
461 # largefiles. Then it overrides util.copyfile in that function it
461 # largefiles. Then it overrides util.copyfile in that function it
462 # checks if the destination largefile already exists. It also keeps a
462 # checks if the destination largefile already exists. It also keeps a
463 # list of copied files so that the largefiles can be copied and the
463 # list of copied files so that the largefiles can be copied and the
464 # dirstate updated.
464 # dirstate updated.
465 def overridecopy(orig, ui, repo, pats, opts, rename=False):
465 def overridecopy(orig, ui, repo, pats, opts, rename=False):
466 # doesn't remove largefile on rename
466 # doesn't remove largefile on rename
467 if len(pats) < 2:
467 if len(pats) < 2:
468 # this isn't legal, let the original function deal with it
468 # this isn't legal, let the original function deal with it
469 return orig(ui, repo, pats, opts, rename)
469 return orig(ui, repo, pats, opts, rename)
470
470
471 def makestandin(relpath):
471 def makestandin(relpath):
472 path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
472 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
473 return os.path.join(repo.wjoin(lfutil.standin(path)))
473 return os.path.join(repo.wjoin(lfutil.standin(path)))
474
474
475 fullpats = scmutil.expandpats(pats)
475 fullpats = scmutil.expandpats(pats)
476 dest = fullpats[-1]
476 dest = fullpats[-1]
477
477
478 if os.path.isdir(dest):
478 if os.path.isdir(dest):
479 if not os.path.isdir(makestandin(dest)):
479 if not os.path.isdir(makestandin(dest)):
480 os.makedirs(makestandin(dest))
480 os.makedirs(makestandin(dest))
481 # This could copy both lfiles and normal files in one command,
481 # This could copy both lfiles and normal files in one command,
482 # but we don't want to do that. First replace their matcher to
482 # but we don't want to do that. First replace their matcher to
483 # only match normal files and run it, then replace it to just
483 # only match normal files and run it, then replace it to just
484 # match largefiles and run it again.
484 # match largefiles and run it again.
485 nonormalfiles = False
485 nonormalfiles = False
486 nolfiles = False
486 nolfiles = False
487 try:
487 try:
488 try:
488 try:
489 installnormalfilesmatchfn(repo[None].manifest())
489 installnormalfilesmatchfn(repo[None].manifest())
490 result = orig(ui, repo, pats, opts, rename)
490 result = orig(ui, repo, pats, opts, rename)
491 except util.Abort, e:
491 except util.Abort, e:
492 if str(e) != _('no files to copy'):
492 if str(e) != _('no files to copy'):
493 raise e
493 raise e
494 else:
494 else:
495 nonormalfiles = True
495 nonormalfiles = True
496 result = 0
496 result = 0
497 finally:
497 finally:
498 restorematchfn()
498 restorematchfn()
499
499
500 # The first rename can cause our current working directory to be removed.
500 # The first rename can cause our current working directory to be removed.
501 # In that case there is nothing left to copy/rename so just quit.
501 # In that case there is nothing left to copy/rename so just quit.
502 try:
502 try:
503 repo.getcwd()
503 repo.getcwd()
504 except OSError:
504 except OSError:
505 return result
505 return result
506
506
507 try:
507 try:
508 try:
508 try:
509 # When we call orig below it creates the standins but we don't add
509 # When we call orig below it creates the standins but we don't add
510 # them to the dir state until later so lock during that time.
510 # them to the dir state until later so lock during that time.
511 wlock = repo.wlock()
511 wlock = repo.wlock()
512
512
513 manifest = repo[None].manifest()
513 manifest = repo[None].manifest()
514 oldmatch = None # for the closure
514 oldmatch = None # for the closure
515 def overridematch(ctx, pats=[], opts={}, globbed=False,
515 def overridematch(ctx, pats=[], opts={}, globbed=False,
516 default='relpath'):
516 default='relpath'):
517 newpats = []
517 newpats = []
518 # The patterns were previously mangled to add the standin
518 # The patterns were previously mangled to add the standin
519 # directory; we need to remove that now
519 # directory; we need to remove that now
520 for pat in pats:
520 for pat in pats:
521 if match_.patkind(pat) is None and lfutil.shortname in pat:
521 if match_.patkind(pat) is None and lfutil.shortname in pat:
522 newpats.append(pat.replace(lfutil.shortname, ''))
522 newpats.append(pat.replace(lfutil.shortname, ''))
523 else:
523 else:
524 newpats.append(pat)
524 newpats.append(pat)
525 match = oldmatch(ctx, newpats, opts, globbed, default)
525 match = oldmatch(ctx, newpats, opts, globbed, default)
526 m = copy.copy(match)
526 m = copy.copy(match)
527 lfile = lambda f: lfutil.standin(f) in manifest
527 lfile = lambda f: lfutil.standin(f) in manifest
528 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
528 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
529 m._fmap = set(m._files)
529 m._fmap = set(m._files)
530 m._always = False
530 m._always = False
531 origmatchfn = m.matchfn
531 origmatchfn = m.matchfn
532 m.matchfn = lambda f: (lfutil.isstandin(f) and
532 m.matchfn = lambda f: (lfutil.isstandin(f) and
533 (f in manifest) and
533 (f in manifest) and
534 origmatchfn(lfutil.splitstandin(f)) or
534 origmatchfn(lfutil.splitstandin(f)) or
535 None)
535 None)
536 return m
536 return m
537 oldmatch = installmatchfn(overridematch)
537 oldmatch = installmatchfn(overridematch)
538 listpats = []
538 listpats = []
539 for pat in pats:
539 for pat in pats:
540 if match_.patkind(pat) is not None:
540 if match_.patkind(pat) is not None:
541 listpats.append(pat)
541 listpats.append(pat)
542 else:
542 else:
543 listpats.append(makestandin(pat))
543 listpats.append(makestandin(pat))
544
544
545 try:
545 try:
546 origcopyfile = util.copyfile
546 origcopyfile = util.copyfile
547 copiedfiles = []
547 copiedfiles = []
548 def overridecopyfile(src, dest):
548 def overridecopyfile(src, dest):
549 if (lfutil.shortname in src and
549 if (lfutil.shortname in src and
550 dest.startswith(repo.wjoin(lfutil.shortname))):
550 dest.startswith(repo.wjoin(lfutil.shortname))):
551 destlfile = dest.replace(lfutil.shortname, '')
551 destlfile = dest.replace(lfutil.shortname, '')
552 if not opts['force'] and os.path.exists(destlfile):
552 if not opts['force'] and os.path.exists(destlfile):
553 raise IOError('',
553 raise IOError('',
554 _('destination largefile already exists'))
554 _('destination largefile already exists'))
555 copiedfiles.append((src, dest))
555 copiedfiles.append((src, dest))
556 origcopyfile(src, dest)
556 origcopyfile(src, dest)
557
557
558 util.copyfile = overridecopyfile
558 util.copyfile = overridecopyfile
559 result += orig(ui, repo, listpats, opts, rename)
559 result += orig(ui, repo, listpats, opts, rename)
560 finally:
560 finally:
561 util.copyfile = origcopyfile
561 util.copyfile = origcopyfile
562
562
563 lfdirstate = lfutil.openlfdirstate(ui, repo)
563 lfdirstate = lfutil.openlfdirstate(ui, repo)
564 for (src, dest) in copiedfiles:
564 for (src, dest) in copiedfiles:
565 if (lfutil.shortname in src and
565 if (lfutil.shortname in src and
566 dest.startswith(repo.wjoin(lfutil.shortname))):
566 dest.startswith(repo.wjoin(lfutil.shortname))):
567 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
567 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
568 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
568 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
569 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
569 destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
570 if not os.path.isdir(destlfiledir):
570 if not os.path.isdir(destlfiledir):
571 os.makedirs(destlfiledir)
571 os.makedirs(destlfiledir)
572 if rename:
572 if rename:
573 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
573 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
574 lfdirstate.remove(srclfile)
574 lfdirstate.remove(srclfile)
575 else:
575 else:
576 util.copyfile(repo.wjoin(srclfile),
576 util.copyfile(repo.wjoin(srclfile),
577 repo.wjoin(destlfile))
577 repo.wjoin(destlfile))
578
578
579 lfdirstate.add(destlfile)
579 lfdirstate.add(destlfile)
580 lfdirstate.write()
580 lfdirstate.write()
581 except util.Abort, e:
581 except util.Abort, e:
582 if str(e) != _('no files to copy'):
582 if str(e) != _('no files to copy'):
583 raise e
583 raise e
584 else:
584 else:
585 nolfiles = True
585 nolfiles = True
586 finally:
586 finally:
587 restorematchfn()
587 restorematchfn()
588 wlock.release()
588 wlock.release()
589
589
590 if nolfiles and nonormalfiles:
590 if nolfiles and nonormalfiles:
591 raise util.Abort(_('no files to copy'))
591 raise util.Abort(_('no files to copy'))
592
592
593 return result
593 return result
594
594
595 # When the user calls revert, we have to be careful to not revert any
595 # When the user calls revert, we have to be careful to not revert any
596 # changes to other largefiles accidentally. This means we have to keep
596 # changes to other largefiles accidentally. This means we have to keep
597 # track of the largefiles that are being reverted so we only pull down
597 # track of the largefiles that are being reverted so we only pull down
598 # the necessary largefiles.
598 # the necessary largefiles.
599 #
599 #
600 # Standins are only updated (to match the hash of largefiles) before
600 # Standins are only updated (to match the hash of largefiles) before
601 # commits. Update the standins then run the original revert, changing
601 # commits. Update the standins then run the original revert, changing
602 # the matcher to hit standins instead of largefiles. Based on the
602 # the matcher to hit standins instead of largefiles. Based on the
603 # resulting standins update the largefiles. Then return the standins
603 # resulting standins update the largefiles. Then return the standins
604 # to their proper state
604 # to their proper state
605 def overriderevert(orig, ui, repo, *pats, **opts):
605 def overriderevert(orig, ui, repo, *pats, **opts):
606 # Because we put the standins in a bad state (by updating them)
606 # Because we put the standins in a bad state (by updating them)
607 # and then return them to a correct state we need to lock to
607 # and then return them to a correct state we need to lock to
608 # prevent others from changing them in their incorrect state.
608 # prevent others from changing them in their incorrect state.
609 wlock = repo.wlock()
609 wlock = repo.wlock()
610 try:
610 try:
611 lfdirstate = lfutil.openlfdirstate(ui, repo)
611 lfdirstate = lfutil.openlfdirstate(ui, repo)
612 (modified, added, removed, missing, unknown, ignored, clean) = \
612 (modified, added, removed, missing, unknown, ignored, clean) = \
613 lfutil.lfdirstatestatus(lfdirstate, repo, repo['.'].rev())
613 lfutil.lfdirstatestatus(lfdirstate, repo, repo['.'].rev())
614 lfdirstate.write()
614 lfdirstate.write()
615 for lfile in modified:
615 for lfile in modified:
616 lfutil.updatestandin(repo, lfutil.standin(lfile))
616 lfutil.updatestandin(repo, lfutil.standin(lfile))
617 for lfile in missing:
617 for lfile in missing:
618 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
618 if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
619 os.unlink(repo.wjoin(lfutil.standin(lfile)))
619 os.unlink(repo.wjoin(lfutil.standin(lfile)))
620
620
621 try:
621 try:
622 ctx = scmutil.revsingle(repo, opts.get('rev'))
622 ctx = scmutil.revsingle(repo, opts.get('rev'))
623 oldmatch = None # for the closure
623 oldmatch = None # for the closure
624 def overridematch(ctx, pats=[], opts={}, globbed=False,
624 def overridematch(ctx, pats=[], opts={}, globbed=False,
625 default='relpath'):
625 default='relpath'):
626 match = oldmatch(ctx, pats, opts, globbed, default)
626 match = oldmatch(ctx, pats, opts, globbed, default)
627 m = copy.copy(match)
627 m = copy.copy(match)
628 def tostandin(f):
628 def tostandin(f):
629 if lfutil.standin(f) in ctx:
629 if lfutil.standin(f) in ctx:
630 return lfutil.standin(f)
630 return lfutil.standin(f)
631 elif lfutil.standin(f) in repo[None]:
631 elif lfutil.standin(f) in repo[None]:
632 return None
632 return None
633 return f
633 return f
634 m._files = [tostandin(f) for f in m._files]
634 m._files = [tostandin(f) for f in m._files]
635 m._files = [f for f in m._files if f is not None]
635 m._files = [f for f in m._files if f is not None]
636 m._fmap = set(m._files)
636 m._fmap = set(m._files)
637 m._always = False
637 m._always = False
638 origmatchfn = m.matchfn
638 origmatchfn = m.matchfn
639 def matchfn(f):
639 def matchfn(f):
640 if lfutil.isstandin(f):
640 if lfutil.isstandin(f):
641 # We need to keep track of what largefiles are being
641 # We need to keep track of what largefiles are being
642 # matched so we know which ones to update later --
642 # matched so we know which ones to update later --
643 # otherwise we accidentally revert changes to other
643 # otherwise we accidentally revert changes to other
644 # largefiles. This is repo-specific, so duckpunch the
644 # largefiles. This is repo-specific, so duckpunch the
645 # repo object to keep the list of largefiles for us
645 # repo object to keep the list of largefiles for us
646 # later.
646 # later.
647 if origmatchfn(lfutil.splitstandin(f)) and \
647 if origmatchfn(lfutil.splitstandin(f)) and \
648 (f in repo[None] or f in ctx):
648 (f in repo[None] or f in ctx):
649 lfileslist = getattr(repo, '_lfilestoupdate', [])
649 lfileslist = getattr(repo, '_lfilestoupdate', [])
650 lfileslist.append(lfutil.splitstandin(f))
650 lfileslist.append(lfutil.splitstandin(f))
651 repo._lfilestoupdate = lfileslist
651 repo._lfilestoupdate = lfileslist
652 return True
652 return True
653 else:
653 else:
654 return False
654 return False
655 return origmatchfn(f)
655 return origmatchfn(f)
656 m.matchfn = matchfn
656 m.matchfn = matchfn
657 return m
657 return m
658 oldmatch = installmatchfn(overridematch)
658 oldmatch = installmatchfn(overridematch)
659 scmutil.match
659 scmutil.match
660 matches = overridematch(repo[None], pats, opts)
660 matches = overridematch(repo[None], pats, opts)
661 orig(ui, repo, *pats, **opts)
661 orig(ui, repo, *pats, **opts)
662 finally:
662 finally:
663 restorematchfn()
663 restorematchfn()
664 lfileslist = getattr(repo, '_lfilestoupdate', [])
664 lfileslist = getattr(repo, '_lfilestoupdate', [])
665 lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
665 lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
666 printmessage=False)
666 printmessage=False)
667
667
668 # empty out the largefiles list so we start fresh next time
668 # empty out the largefiles list so we start fresh next time
669 repo._lfilestoupdate = []
669 repo._lfilestoupdate = []
670 for lfile in modified:
670 for lfile in modified:
671 if lfile in lfileslist:
671 if lfile in lfileslist:
672 if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
672 if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
673 in repo['.']:
673 in repo['.']:
674 lfutil.writestandin(repo, lfutil.standin(lfile),
674 lfutil.writestandin(repo, lfutil.standin(lfile),
675 repo['.'][lfile].data().strip(),
675 repo['.'][lfile].data().strip(),
676 'x' in repo['.'][lfile].flags())
676 'x' in repo['.'][lfile].flags())
677 lfdirstate = lfutil.openlfdirstate(ui, repo)
677 lfdirstate = lfutil.openlfdirstate(ui, repo)
678 for lfile in added:
678 for lfile in added:
679 standin = lfutil.standin(lfile)
679 standin = lfutil.standin(lfile)
680 if standin not in ctx and (standin in matches or opts.get('all')):
680 if standin not in ctx and (standin in matches or opts.get('all')):
681 if lfile in lfdirstate:
681 if lfile in lfdirstate:
682 lfdirstate.drop(lfile)
682 lfdirstate.drop(lfile)
683 util.unlinkpath(repo.wjoin(standin))
683 util.unlinkpath(repo.wjoin(standin))
684 lfdirstate.write()
684 lfdirstate.write()
685 finally:
685 finally:
686 wlock.release()
686 wlock.release()
687
687
688 def hgupdaterepo(orig, repo, node, overwrite):
688 def hgupdaterepo(orig, repo, node, overwrite):
689 if not overwrite:
689 if not overwrite:
690 # Only call updatelfiles on the standins that have changed to save time
690 # Only call updatelfiles on the standins that have changed to save time
691 oldstandins = lfutil.getstandinsstate(repo)
691 oldstandins = lfutil.getstandinsstate(repo)
692
692
693 result = orig(repo, node, overwrite)
693 result = orig(repo, node, overwrite)
694
694
695 filelist = None
695 filelist = None
696 if not overwrite:
696 if not overwrite:
697 newstandins = lfutil.getstandinsstate(repo)
697 newstandins = lfutil.getstandinsstate(repo)
698 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
698 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
699 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist)
699 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist)
700 return result
700 return result
701
701
702 def hgmerge(orig, repo, node, force=None, remind=True):
702 def hgmerge(orig, repo, node, force=None, remind=True):
703 result = orig(repo, node, force, remind)
703 result = orig(repo, node, force, remind)
704 lfcommands.updatelfiles(repo.ui, repo)
704 lfcommands.updatelfiles(repo.ui, repo)
705 return result
705 return result
706
706
707 # When we rebase a repository with remotely changed largefiles, we need to
707 # When we rebase a repository with remotely changed largefiles, we need to
708 # take some extra care so that the largefiles are correctly updated in the
708 # take some extra care so that the largefiles are correctly updated in the
709 # working copy
709 # working copy
710 def overridepull(orig, ui, repo, source=None, **opts):
710 def overridepull(orig, ui, repo, source=None, **opts):
711 revsprepull = len(repo)
711 revsprepull = len(repo)
712 if not source:
712 if not source:
713 source = 'default'
713 source = 'default'
714 repo.lfpullsource = source
714 repo.lfpullsource = source
715 if opts.get('rebase', False):
715 if opts.get('rebase', False):
716 repo._isrebasing = True
716 repo._isrebasing = True
717 try:
717 try:
718 if opts.get('update'):
718 if opts.get('update'):
719 del opts['update']
719 del opts['update']
720 ui.debug('--update and --rebase are not compatible, ignoring '
720 ui.debug('--update and --rebase are not compatible, ignoring '
721 'the update flag\n')
721 'the update flag\n')
722 del opts['rebase']
722 del opts['rebase']
723 origpostincoming = commands.postincoming
723 origpostincoming = commands.postincoming
724 def _dummy(*args, **kwargs):
724 def _dummy(*args, **kwargs):
725 pass
725 pass
726 commands.postincoming = _dummy
726 commands.postincoming = _dummy
727 try:
727 try:
728 result = commands.pull(ui, repo, source, **opts)
728 result = commands.pull(ui, repo, source, **opts)
729 finally:
729 finally:
730 commands.postincoming = origpostincoming
730 commands.postincoming = origpostincoming
731 revspostpull = len(repo)
731 revspostpull = len(repo)
732 if revspostpull > revsprepull:
732 if revspostpull > revsprepull:
733 result = result or rebase.rebase(ui, repo)
733 result = result or rebase.rebase(ui, repo)
734 finally:
734 finally:
735 repo._isrebasing = False
735 repo._isrebasing = False
736 else:
736 else:
737 result = orig(ui, repo, source, **opts)
737 result = orig(ui, repo, source, **opts)
738 revspostpull = len(repo)
738 revspostpull = len(repo)
739 lfrevs = opts.get('lfrev', [])
739 lfrevs = opts.get('lfrev', [])
740 if opts.get('all_largefiles'):
740 if opts.get('all_largefiles'):
741 lfrevs.append('pulled()')
741 lfrevs.append('pulled()')
742 if lfrevs and revspostpull > revsprepull:
742 if lfrevs and revspostpull > revsprepull:
743 numcached = 0
743 numcached = 0
744 repo.firstpulled = revsprepull # for pulled() revset expression
744 repo.firstpulled = revsprepull # for pulled() revset expression
745 try:
745 try:
746 for rev in scmutil.revrange(repo, lfrevs):
746 for rev in scmutil.revrange(repo, lfrevs):
747 ui.note(_('pulling largefiles for revision %s\n') % rev)
747 ui.note(_('pulling largefiles for revision %s\n') % rev)
748 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
748 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
749 numcached += len(cached)
749 numcached += len(cached)
750 finally:
750 finally:
751 del repo.firstpulled
751 del repo.firstpulled
752 ui.status(_("%d largefiles cached\n") % numcached)
752 ui.status(_("%d largefiles cached\n") % numcached)
753 return result
753 return result
754
754
755 def pulledrevsetsymbol(repo, subset, x):
755 def pulledrevsetsymbol(repo, subset, x):
756 """``pulled()``
756 """``pulled()``
757 Changesets that just has been pulled.
757 Changesets that just has been pulled.
758
758
759 Only available with largefiles from pull --lfrev expressions.
759 Only available with largefiles from pull --lfrev expressions.
760
760
761 .. container:: verbose
761 .. container:: verbose
762
762
763 Some examples:
763 Some examples:
764
764
765 - pull largefiles for all new changesets::
765 - pull largefiles for all new changesets::
766
766
767 hg pull -lfrev "pulled()"
767 hg pull -lfrev "pulled()"
768
768
769 - pull largefiles for all new branch heads::
769 - pull largefiles for all new branch heads::
770
770
771 hg pull -lfrev "head(pulled()) and not closed()"
771 hg pull -lfrev "head(pulled()) and not closed()"
772
772
773 """
773 """
774
774
775 try:
775 try:
776 firstpulled = repo.firstpulled
776 firstpulled = repo.firstpulled
777 except AttributeError:
777 except AttributeError:
778 raise util.Abort(_("pulled() only available in --lfrev"))
778 raise util.Abort(_("pulled() only available in --lfrev"))
779 return [r for r in subset if r >= firstpulled]
779 return [r for r in subset if r >= firstpulled]
780
780
781 def overrideclone(orig, ui, source, dest=None, **opts):
781 def overrideclone(orig, ui, source, dest=None, **opts):
782 d = dest
782 d = dest
783 if d is None:
783 if d is None:
784 d = hg.defaultdest(source)
784 d = hg.defaultdest(source)
785 if opts.get('all_largefiles') and not hg.islocal(d):
785 if opts.get('all_largefiles') and not hg.islocal(d):
786 raise util.Abort(_(
786 raise util.Abort(_(
787 '--all-largefiles is incompatible with non-local destination %s' %
787 '--all-largefiles is incompatible with non-local destination %s' %
788 d))
788 d))
789
789
790 return orig(ui, source, dest, **opts)
790 return orig(ui, source, dest, **opts)
791
791
792 def hgclone(orig, ui, opts, *args, **kwargs):
792 def hgclone(orig, ui, opts, *args, **kwargs):
793 result = orig(ui, opts, *args, **kwargs)
793 result = orig(ui, opts, *args, **kwargs)
794
794
795 if result is not None:
795 if result is not None:
796 sourcerepo, destrepo = result
796 sourcerepo, destrepo = result
797 repo = destrepo.local()
797 repo = destrepo.local()
798
798
799 # Caching is implicitly limited to 'rev' option, since the dest repo was
799 # Caching is implicitly limited to 'rev' option, since the dest repo was
800 # truncated at that point. The user may expect a download count with
800 # truncated at that point. The user may expect a download count with
801 # this option, so attempt whether or not this is a largefile repo.
801 # this option, so attempt whether or not this is a largefile repo.
802 if opts.get('all_largefiles'):
802 if opts.get('all_largefiles'):
803 success, missing = lfcommands.downloadlfiles(ui, repo, None)
803 success, missing = lfcommands.downloadlfiles(ui, repo, None)
804
804
805 if missing != 0:
805 if missing != 0:
806 return None
806 return None
807
807
808 return result
808 return result
809
809
810 def overriderebase(orig, ui, repo, **opts):
810 def overriderebase(orig, ui, repo, **opts):
811 repo._isrebasing = True
811 repo._isrebasing = True
812 try:
812 try:
813 return orig(ui, repo, **opts)
813 return orig(ui, repo, **opts)
814 finally:
814 finally:
815 repo._isrebasing = False
815 repo._isrebasing = False
816
816
817 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
817 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
818 prefix=None, mtime=None, subrepos=None):
818 prefix=None, mtime=None, subrepos=None):
819 # No need to lock because we are only reading history and
819 # No need to lock because we are only reading history and
820 # largefile caches, neither of which are modified.
820 # largefile caches, neither of which are modified.
821 lfcommands.cachelfiles(repo.ui, repo, node)
821 lfcommands.cachelfiles(repo.ui, repo, node)
822
822
823 if kind not in archival.archivers:
823 if kind not in archival.archivers:
824 raise util.Abort(_("unknown archive type '%s'") % kind)
824 raise util.Abort(_("unknown archive type '%s'") % kind)
825
825
826 ctx = repo[node]
826 ctx = repo[node]
827
827
828 if kind == 'files':
828 if kind == 'files':
829 if prefix:
829 if prefix:
830 raise util.Abort(
830 raise util.Abort(
831 _('cannot give prefix when archiving to files'))
831 _('cannot give prefix when archiving to files'))
832 else:
832 else:
833 prefix = archival.tidyprefix(dest, kind, prefix)
833 prefix = archival.tidyprefix(dest, kind, prefix)
834
834
835 def write(name, mode, islink, getdata):
835 def write(name, mode, islink, getdata):
836 if matchfn and not matchfn(name):
836 if matchfn and not matchfn(name):
837 return
837 return
838 data = getdata()
838 data = getdata()
839 if decode:
839 if decode:
840 data = repo.wwritedata(name, data)
840 data = repo.wwritedata(name, data)
841 archiver.addfile(prefix + name, mode, islink, data)
841 archiver.addfile(prefix + name, mode, islink, data)
842
842
843 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
843 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
844
844
845 if repo.ui.configbool("ui", "archivemeta", True):
845 if repo.ui.configbool("ui", "archivemeta", True):
846 def metadata():
846 def metadata():
847 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
847 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
848 hex(repo.changelog.node(0)), hex(node), ctx.branch())
848 hex(repo.changelog.node(0)), hex(node), ctx.branch())
849
849
850 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
850 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
851 if repo.tagtype(t) == 'global')
851 if repo.tagtype(t) == 'global')
852 if not tags:
852 if not tags:
853 repo.ui.pushbuffer()
853 repo.ui.pushbuffer()
854 opts = {'template': '{latesttag}\n{latesttagdistance}',
854 opts = {'template': '{latesttag}\n{latesttagdistance}',
855 'style': '', 'patch': None, 'git': None}
855 'style': '', 'patch': None, 'git': None}
856 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
856 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
857 ltags, dist = repo.ui.popbuffer().split('\n')
857 ltags, dist = repo.ui.popbuffer().split('\n')
858 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
858 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
859 tags += 'latesttagdistance: %s\n' % dist
859 tags += 'latesttagdistance: %s\n' % dist
860
860
861 return base + tags
861 return base + tags
862
862
863 write('.hg_archival.txt', 0644, False, metadata)
863 write('.hg_archival.txt', 0644, False, metadata)
864
864
865 for f in ctx:
865 for f in ctx:
866 ff = ctx.flags(f)
866 ff = ctx.flags(f)
867 getdata = ctx[f].data
867 getdata = ctx[f].data
868 if lfutil.isstandin(f):
868 if lfutil.isstandin(f):
869 path = lfutil.findfile(repo, getdata().strip())
869 path = lfutil.findfile(repo, getdata().strip())
870 if path is None:
870 if path is None:
871 raise util.Abort(
871 raise util.Abort(
872 _('largefile %s not found in repo store or system cache')
872 _('largefile %s not found in repo store or system cache')
873 % lfutil.splitstandin(f))
873 % lfutil.splitstandin(f))
874 f = lfutil.splitstandin(f)
874 f = lfutil.splitstandin(f)
875
875
876 def getdatafn():
876 def getdatafn():
877 fd = None
877 fd = None
878 try:
878 try:
879 fd = open(path, 'rb')
879 fd = open(path, 'rb')
880 return fd.read()
880 return fd.read()
881 finally:
881 finally:
882 if fd:
882 if fd:
883 fd.close()
883 fd.close()
884
884
885 getdata = getdatafn
885 getdata = getdatafn
886 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
886 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
887
887
888 if subrepos:
888 if subrepos:
889 for subpath in sorted(ctx.substate):
889 for subpath in sorted(ctx.substate):
890 sub = ctx.sub(subpath)
890 sub = ctx.sub(subpath)
891 submatch = match_.narrowmatcher(subpath, matchfn)
891 submatch = match_.narrowmatcher(subpath, matchfn)
892 sub.archive(repo.ui, archiver, prefix, submatch)
892 sub.archive(repo.ui, archiver, prefix, submatch)
893
893
894 archiver.done()
894 archiver.done()
895
895
896 def hgsubrepoarchive(orig, repo, ui, archiver, prefix, match=None):
896 def hgsubrepoarchive(orig, repo, ui, archiver, prefix, match=None):
897 repo._get(repo._state + ('hg',))
897 repo._get(repo._state + ('hg',))
898 rev = repo._state[1]
898 rev = repo._state[1]
899 ctx = repo._repo[rev]
899 ctx = repo._repo[rev]
900
900
901 lfcommands.cachelfiles(ui, repo._repo, ctx.node())
901 lfcommands.cachelfiles(ui, repo._repo, ctx.node())
902
902
903 def write(name, mode, islink, getdata):
903 def write(name, mode, islink, getdata):
904 # At this point, the standin has been replaced with the largefile name,
904 # At this point, the standin has been replaced with the largefile name,
905 # so the normal matcher works here without the lfutil variants.
905 # so the normal matcher works here without the lfutil variants.
906 if match and not match(f):
906 if match and not match(f):
907 return
907 return
908 data = getdata()
908 data = getdata()
909
909
910 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
910 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
911
911
912 for f in ctx:
912 for f in ctx:
913 ff = ctx.flags(f)
913 ff = ctx.flags(f)
914 getdata = ctx[f].data
914 getdata = ctx[f].data
915 if lfutil.isstandin(f):
915 if lfutil.isstandin(f):
916 path = lfutil.findfile(repo._repo, getdata().strip())
916 path = lfutil.findfile(repo._repo, getdata().strip())
917 if path is None:
917 if path is None:
918 raise util.Abort(
918 raise util.Abort(
919 _('largefile %s not found in repo store or system cache')
919 _('largefile %s not found in repo store or system cache')
920 % lfutil.splitstandin(f))
920 % lfutil.splitstandin(f))
921 f = lfutil.splitstandin(f)
921 f = lfutil.splitstandin(f)
922
922
923 def getdatafn():
923 def getdatafn():
924 fd = None
924 fd = None
925 try:
925 try:
926 fd = open(os.path.join(prefix, path), 'rb')
926 fd = open(os.path.join(prefix, path), 'rb')
927 return fd.read()
927 return fd.read()
928 finally:
928 finally:
929 if fd:
929 if fd:
930 fd.close()
930 fd.close()
931
931
932 getdata = getdatafn
932 getdata = getdatafn
933
933
934 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
934 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
935
935
936 for subpath in sorted(ctx.substate):
936 for subpath in sorted(ctx.substate):
937 sub = ctx.sub(subpath)
937 sub = ctx.sub(subpath)
938 submatch = match_.narrowmatcher(subpath, match)
938 submatch = match_.narrowmatcher(subpath, match)
939 sub.archive(ui, archiver, os.path.join(prefix, repo._path) + '/',
939 sub.archive(ui, archiver, os.path.join(prefix, repo._path) + '/',
940 submatch)
940 submatch)
941
941
942 # If a largefile is modified, the change is not reflected in its
942 # If a largefile is modified, the change is not reflected in its
943 # standin until a commit. cmdutil.bailifchanged() raises an exception
943 # standin until a commit. cmdutil.bailifchanged() raises an exception
944 # if the repo has uncommitted changes. Wrap it to also check if
944 # if the repo has uncommitted changes. Wrap it to also check if
945 # largefiles were changed. This is used by bisect and backout.
945 # largefiles were changed. This is used by bisect and backout.
946 def overridebailifchanged(orig, repo):
946 def overridebailifchanged(orig, repo):
947 orig(repo)
947 orig(repo)
948 repo.lfstatus = True
948 repo.lfstatus = True
949 modified, added, removed, deleted = repo.status()[:4]
949 modified, added, removed, deleted = repo.status()[:4]
950 repo.lfstatus = False
950 repo.lfstatus = False
951 if modified or added or removed or deleted:
951 if modified or added or removed or deleted:
952 raise util.Abort(_('uncommitted changes'))
952 raise util.Abort(_('uncommitted changes'))
953
953
954 # Fetch doesn't use cmdutil.bailifchanged so override it to add the check
954 # Fetch doesn't use cmdutil.bailifchanged so override it to add the check
955 def overridefetch(orig, ui, repo, *pats, **opts):
955 def overridefetch(orig, ui, repo, *pats, **opts):
956 repo.lfstatus = True
956 repo.lfstatus = True
957 modified, added, removed, deleted = repo.status()[:4]
957 modified, added, removed, deleted = repo.status()[:4]
958 repo.lfstatus = False
958 repo.lfstatus = False
959 if modified or added or removed or deleted:
959 if modified or added or removed or deleted:
960 raise util.Abort(_('uncommitted changes'))
960 raise util.Abort(_('uncommitted changes'))
961 return orig(ui, repo, *pats, **opts)
961 return orig(ui, repo, *pats, **opts)
962
962
963 def overrideforget(orig, ui, repo, *pats, **opts):
963 def overrideforget(orig, ui, repo, *pats, **opts):
964 installnormalfilesmatchfn(repo[None].manifest())
964 installnormalfilesmatchfn(repo[None].manifest())
965 result = orig(ui, repo, *pats, **opts)
965 result = orig(ui, repo, *pats, **opts)
966 restorematchfn()
966 restorematchfn()
967 m = scmutil.match(repo[None], pats, opts)
967 m = scmutil.match(repo[None], pats, opts)
968
968
969 try:
969 try:
970 repo.lfstatus = True
970 repo.lfstatus = True
971 s = repo.status(match=m, clean=True)
971 s = repo.status(match=m, clean=True)
972 finally:
972 finally:
973 repo.lfstatus = False
973 repo.lfstatus = False
974 forget = sorted(s[0] + s[1] + s[3] + s[6])
974 forget = sorted(s[0] + s[1] + s[3] + s[6])
975 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
975 forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
976
976
977 for f in forget:
977 for f in forget:
978 if lfutil.standin(f) not in repo.dirstate and not \
978 if lfutil.standin(f) not in repo.dirstate and not \
979 os.path.isdir(m.rel(lfutil.standin(f))):
979 os.path.isdir(m.rel(lfutil.standin(f))):
980 ui.warn(_('not removing %s: file is already untracked\n')
980 ui.warn(_('not removing %s: file is already untracked\n')
981 % m.rel(f))
981 % m.rel(f))
982 result = 1
982 result = 1
983
983
984 for f in forget:
984 for f in forget:
985 if ui.verbose or not m.exact(f):
985 if ui.verbose or not m.exact(f):
986 ui.status(_('removing %s\n') % m.rel(f))
986 ui.status(_('removing %s\n') % m.rel(f))
987
987
988 # Need to lock because standin files are deleted then removed from the
988 # Need to lock because standin files are deleted then removed from the
989 # repository and we could race in-between.
989 # repository and we could race in-between.
990 wlock = repo.wlock()
990 wlock = repo.wlock()
991 try:
991 try:
992 lfdirstate = lfutil.openlfdirstate(ui, repo)
992 lfdirstate = lfutil.openlfdirstate(ui, repo)
993 for f in forget:
993 for f in forget:
994 if lfdirstate[f] == 'a':
994 if lfdirstate[f] == 'a':
995 lfdirstate.drop(f)
995 lfdirstate.drop(f)
996 else:
996 else:
997 lfdirstate.remove(f)
997 lfdirstate.remove(f)
998 lfdirstate.write()
998 lfdirstate.write()
999 standins = [lfutil.standin(f) for f in forget]
999 standins = [lfutil.standin(f) for f in forget]
1000 for f in standins:
1000 for f in standins:
1001 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1001 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
1002 repo[None].forget(standins)
1002 repo[None].forget(standins)
1003 finally:
1003 finally:
1004 wlock.release()
1004 wlock.release()
1005
1005
1006 return result
1006 return result
1007
1007
1008 def getoutgoinglfiles(ui, repo, dest=None, **opts):
1008 def getoutgoinglfiles(ui, repo, dest=None, **opts):
1009 dest = ui.expandpath(dest or 'default-push', dest or 'default')
1009 dest = ui.expandpath(dest or 'default-push', dest or 'default')
1010 dest, branches = hg.parseurl(dest, opts.get('branch'))
1010 dest, branches = hg.parseurl(dest, opts.get('branch'))
1011 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
1011 revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
1012 if revs:
1012 if revs:
1013 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
1013 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
1014
1014
1015 try:
1015 try:
1016 remote = hg.peer(repo, opts, dest)
1016 remote = hg.peer(repo, opts, dest)
1017 except error.RepoError:
1017 except error.RepoError:
1018 return None
1018 return None
1019 outgoing = discovery.findcommonoutgoing(repo, remote.peer(), force=False)
1019 outgoing = discovery.findcommonoutgoing(repo, remote.peer(), force=False)
1020 if not outgoing.missing:
1020 if not outgoing.missing:
1021 return outgoing.missing
1021 return outgoing.missing
1022 o = repo.changelog.nodesbetween(outgoing.missing, revs)[0]
1022 o = repo.changelog.nodesbetween(outgoing.missing, revs)[0]
1023 if opts.get('newest_first'):
1023 if opts.get('newest_first'):
1024 o.reverse()
1024 o.reverse()
1025
1025
1026 toupload = set()
1026 toupload = set()
1027 for n in o:
1027 for n in o:
1028 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
1028 parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
1029 ctx = repo[n]
1029 ctx = repo[n]
1030 files = set(ctx.files())
1030 files = set(ctx.files())
1031 if len(parents) == 2:
1031 if len(parents) == 2:
1032 mc = ctx.manifest()
1032 mc = ctx.manifest()
1033 mp1 = ctx.parents()[0].manifest()
1033 mp1 = ctx.parents()[0].manifest()
1034 mp2 = ctx.parents()[1].manifest()
1034 mp2 = ctx.parents()[1].manifest()
1035 for f in mp1:
1035 for f in mp1:
1036 if f not in mc:
1036 if f not in mc:
1037 files.add(f)
1037 files.add(f)
1038 for f in mp2:
1038 for f in mp2:
1039 if f not in mc:
1039 if f not in mc:
1040 files.add(f)
1040 files.add(f)
1041 for f in mc:
1041 for f in mc:
1042 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
1042 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
1043 files.add(f)
1043 files.add(f)
1044 toupload = toupload.union(
1044 toupload = toupload.union(
1045 set([f for f in files if lfutil.isstandin(f) and f in ctx]))
1045 set([f for f in files if lfutil.isstandin(f) and f in ctx]))
1046 return sorted(toupload)
1046 return sorted(toupload)
1047
1047
1048 def overrideoutgoing(orig, ui, repo, dest=None, **opts):
1048 def overrideoutgoing(orig, ui, repo, dest=None, **opts):
1049 result = orig(ui, repo, dest, **opts)
1049 result = orig(ui, repo, dest, **opts)
1050
1050
1051 if opts.pop('large', None):
1051 if opts.pop('large', None):
1052 toupload = getoutgoinglfiles(ui, repo, dest, **opts)
1052 toupload = getoutgoinglfiles(ui, repo, dest, **opts)
1053 if toupload is None:
1053 if toupload is None:
1054 ui.status(_('largefiles: No remote repo\n'))
1054 ui.status(_('largefiles: No remote repo\n'))
1055 elif not toupload:
1055 elif not toupload:
1056 ui.status(_('largefiles: no files to upload\n'))
1056 ui.status(_('largefiles: no files to upload\n'))
1057 else:
1057 else:
1058 ui.status(_('largefiles to upload:\n'))
1058 ui.status(_('largefiles to upload:\n'))
1059 for file in toupload:
1059 for file in toupload:
1060 ui.status(lfutil.splitstandin(file) + '\n')
1060 ui.status(lfutil.splitstandin(file) + '\n')
1061 ui.status('\n')
1061 ui.status('\n')
1062
1062
1063 return result
1063 return result
1064
1064
1065 def overridesummary(orig, ui, repo, *pats, **opts):
1065 def overridesummary(orig, ui, repo, *pats, **opts):
1066 try:
1066 try:
1067 repo.lfstatus = True
1067 repo.lfstatus = True
1068 orig(ui, repo, *pats, **opts)
1068 orig(ui, repo, *pats, **opts)
1069 finally:
1069 finally:
1070 repo.lfstatus = False
1070 repo.lfstatus = False
1071
1071
1072 if opts.pop('large', None):
1072 if opts.pop('large', None):
1073 toupload = getoutgoinglfiles(ui, repo, None, **opts)
1073 toupload = getoutgoinglfiles(ui, repo, None, **opts)
1074 if toupload is None:
1074 if toupload is None:
1075 # i18n: column positioning for "hg summary"
1075 # i18n: column positioning for "hg summary"
1076 ui.status(_('largefiles: (no remote repo)\n'))
1076 ui.status(_('largefiles: (no remote repo)\n'))
1077 elif not toupload:
1077 elif not toupload:
1078 # i18n: column positioning for "hg summary"
1078 # i18n: column positioning for "hg summary"
1079 ui.status(_('largefiles: (no files to upload)\n'))
1079 ui.status(_('largefiles: (no files to upload)\n'))
1080 else:
1080 else:
1081 # i18n: column positioning for "hg summary"
1081 # i18n: column positioning for "hg summary"
1082 ui.status(_('largefiles: %d to upload\n') % len(toupload))
1082 ui.status(_('largefiles: %d to upload\n') % len(toupload))
1083
1083
1084 def scmutiladdremove(orig, repo, pats=[], opts={}, dry_run=None,
1084 def scmutiladdremove(orig, repo, pats=[], opts={}, dry_run=None,
1085 similarity=None):
1085 similarity=None):
1086 if not lfutil.islfilesrepo(repo):
1086 if not lfutil.islfilesrepo(repo):
1087 return orig(repo, pats, opts, dry_run, similarity)
1087 return orig(repo, pats, opts, dry_run, similarity)
1088 # Get the list of missing largefiles so we can remove them
1088 # Get the list of missing largefiles so we can remove them
1089 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1089 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1090 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
1090 s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
1091 False, False)
1091 False, False)
1092 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
1092 (unsure, modified, added, removed, missing, unknown, ignored, clean) = s
1093
1093
1094 # Call into the normal remove code, but the removing of the standin, we want
1094 # Call into the normal remove code, but the removing of the standin, we want
1095 # to have handled by original addremove. Monkey patching here makes sure
1095 # to have handled by original addremove. Monkey patching here makes sure
1096 # we don't remove the standin in the largefiles code, preventing a very
1096 # we don't remove the standin in the largefiles code, preventing a very
1097 # confused state later.
1097 # confused state later.
1098 if missing:
1098 if missing:
1099 m = [repo.wjoin(f) for f in missing]
1099 m = [repo.wjoin(f) for f in missing]
1100 repo._isaddremove = True
1100 repo._isaddremove = True
1101 removelargefiles(repo.ui, repo, *m, **opts)
1101 removelargefiles(repo.ui, repo, *m, **opts)
1102 repo._isaddremove = False
1102 repo._isaddremove = False
1103 # Call into the normal add code, and any files that *should* be added as
1103 # Call into the normal add code, and any files that *should* be added as
1104 # largefiles will be
1104 # largefiles will be
1105 addlargefiles(repo.ui, repo, *pats, **opts)
1105 addlargefiles(repo.ui, repo, *pats, **opts)
1106 # Now that we've handled largefiles, hand off to the original addremove
1106 # Now that we've handled largefiles, hand off to the original addremove
1107 # function to take care of the rest. Make sure it doesn't do anything with
1107 # function to take care of the rest. Make sure it doesn't do anything with
1108 # largefiles by installing a matcher that will ignore them.
1108 # largefiles by installing a matcher that will ignore them.
1109 installnormalfilesmatchfn(repo[None].manifest())
1109 installnormalfilesmatchfn(repo[None].manifest())
1110 result = orig(repo, pats, opts, dry_run, similarity)
1110 result = orig(repo, pats, opts, dry_run, similarity)
1111 restorematchfn()
1111 restorematchfn()
1112 return result
1112 return result
1113
1113
1114 # Calling purge with --all will cause the largefiles to be deleted.
1114 # Calling purge with --all will cause the largefiles to be deleted.
1115 # Override repo.status to prevent this from happening.
1115 # Override repo.status to prevent this from happening.
1116 def overridepurge(orig, ui, repo, *dirs, **opts):
1116 def overridepurge(orig, ui, repo, *dirs, **opts):
1117 # XXX large file status is buggy when used on repo proxy.
1117 # XXX large file status is buggy when used on repo proxy.
1118 # XXX this needs to be investigate.
1118 # XXX this needs to be investigate.
1119 repo = repo.unfiltered()
1119 repo = repo.unfiltered()
1120 oldstatus = repo.status
1120 oldstatus = repo.status
1121 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1121 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1122 clean=False, unknown=False, listsubrepos=False):
1122 clean=False, unknown=False, listsubrepos=False):
1123 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1123 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1124 listsubrepos)
1124 listsubrepos)
1125 lfdirstate = lfutil.openlfdirstate(ui, repo)
1125 lfdirstate = lfutil.openlfdirstate(ui, repo)
1126 modified, added, removed, deleted, unknown, ignored, clean = r
1126 modified, added, removed, deleted, unknown, ignored, clean = r
1127 unknown = [f for f in unknown if lfdirstate[f] == '?']
1127 unknown = [f for f in unknown if lfdirstate[f] == '?']
1128 ignored = [f for f in ignored if lfdirstate[f] == '?']
1128 ignored = [f for f in ignored if lfdirstate[f] == '?']
1129 return modified, added, removed, deleted, unknown, ignored, clean
1129 return modified, added, removed, deleted, unknown, ignored, clean
1130 repo.status = overridestatus
1130 repo.status = overridestatus
1131 orig(ui, repo, *dirs, **opts)
1131 orig(ui, repo, *dirs, **opts)
1132 repo.status = oldstatus
1132 repo.status = oldstatus
1133
1133
1134 def overriderollback(orig, ui, repo, **opts):
1134 def overriderollback(orig, ui, repo, **opts):
1135 result = orig(ui, repo, **opts)
1135 result = orig(ui, repo, **opts)
1136 merge.update(repo, node=None, branchmerge=False, force=True,
1136 merge.update(repo, node=None, branchmerge=False, force=True,
1137 partial=lfutil.isstandin)
1137 partial=lfutil.isstandin)
1138 wlock = repo.wlock()
1138 wlock = repo.wlock()
1139 try:
1139 try:
1140 lfdirstate = lfutil.openlfdirstate(ui, repo)
1140 lfdirstate = lfutil.openlfdirstate(ui, repo)
1141 lfiles = lfutil.listlfiles(repo)
1141 lfiles = lfutil.listlfiles(repo)
1142 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
1142 oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
1143 for file in lfiles:
1143 for file in lfiles:
1144 if file in oldlfiles:
1144 if file in oldlfiles:
1145 lfdirstate.normallookup(file)
1145 lfdirstate.normallookup(file)
1146 else:
1146 else:
1147 lfdirstate.add(file)
1147 lfdirstate.add(file)
1148 lfdirstate.write()
1148 lfdirstate.write()
1149 finally:
1149 finally:
1150 wlock.release()
1150 wlock.release()
1151 return result
1151 return result
1152
1152
1153 def overridetransplant(orig, ui, repo, *revs, **opts):
1153 def overridetransplant(orig, ui, repo, *revs, **opts):
1154 try:
1154 try:
1155 oldstandins = lfutil.getstandinsstate(repo)
1155 oldstandins = lfutil.getstandinsstate(repo)
1156 repo._istransplanting = True
1156 repo._istransplanting = True
1157 result = orig(ui, repo, *revs, **opts)
1157 result = orig(ui, repo, *revs, **opts)
1158 newstandins = lfutil.getstandinsstate(repo)
1158 newstandins = lfutil.getstandinsstate(repo)
1159 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1159 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1160 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1160 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1161 printmessage=True)
1161 printmessage=True)
1162 finally:
1162 finally:
1163 repo._istransplanting = False
1163 repo._istransplanting = False
1164 return result
1164 return result
1165
1165
1166 def overridecat(orig, ui, repo, file1, *pats, **opts):
1166 def overridecat(orig, ui, repo, file1, *pats, **opts):
1167 ctx = scmutil.revsingle(repo, opts.get('rev'))
1167 ctx = scmutil.revsingle(repo, opts.get('rev'))
1168 err = 1
1168 err = 1
1169 notbad = set()
1169 notbad = set()
1170 m = scmutil.match(ctx, (file1,) + pats, opts)
1170 m = scmutil.match(ctx, (file1,) + pats, opts)
1171 origmatchfn = m.matchfn
1171 origmatchfn = m.matchfn
1172 def lfmatchfn(f):
1172 def lfmatchfn(f):
1173 lf = lfutil.splitstandin(f)
1173 lf = lfutil.splitstandin(f)
1174 if lf is None:
1174 if lf is None:
1175 return origmatchfn(f)
1175 return origmatchfn(f)
1176 notbad.add(lf)
1176 notbad.add(lf)
1177 return origmatchfn(lf)
1177 return origmatchfn(lf)
1178 m.matchfn = lfmatchfn
1178 m.matchfn = lfmatchfn
1179 origbadfn = m.bad
1179 origbadfn = m.bad
1180 def lfbadfn(f, msg):
1180 def lfbadfn(f, msg):
1181 if not f in notbad:
1181 if not f in notbad:
1182 return origbadfn(f, msg)
1182 return origbadfn(f, msg)
1183 m.bad = lfbadfn
1183 m.bad = lfbadfn
1184 for f in ctx.walk(m):
1184 for f in ctx.walk(m):
1185 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1185 fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1186 pathname=f)
1186 pathname=f)
1187 lf = lfutil.splitstandin(f)
1187 lf = lfutil.splitstandin(f)
1188 if lf is None:
1188 if lf is None:
1189 # duplicating unreachable code from commands.cat
1189 # duplicating unreachable code from commands.cat
1190 data = ctx[f].data()
1190 data = ctx[f].data()
1191 if opts.get('decode'):
1191 if opts.get('decode'):
1192 data = repo.wwritedata(f, data)
1192 data = repo.wwritedata(f, data)
1193 fp.write(data)
1193 fp.write(data)
1194 else:
1194 else:
1195 hash = lfutil.readstandin(repo, lf, ctx.rev())
1195 hash = lfutil.readstandin(repo, lf, ctx.rev())
1196 if not lfutil.inusercache(repo.ui, hash):
1196 if not lfutil.inusercache(repo.ui, hash):
1197 store = basestore._openstore(repo)
1197 store = basestore._openstore(repo)
1198 success, missing = store.get([(lf, hash)])
1198 success, missing = store.get([(lf, hash)])
1199 if len(success) != 1:
1199 if len(success) != 1:
1200 raise util.Abort(
1200 raise util.Abort(
1201 _('largefile %s is not in cache and could not be '
1201 _('largefile %s is not in cache and could not be '
1202 'downloaded') % lf)
1202 'downloaded') % lf)
1203 path = lfutil.usercachepath(repo.ui, hash)
1203 path = lfutil.usercachepath(repo.ui, hash)
1204 fpin = open(path, "rb")
1204 fpin = open(path, "rb")
1205 for chunk in util.filechunkiter(fpin, 128 * 1024):
1205 for chunk in util.filechunkiter(fpin, 128 * 1024):
1206 fp.write(chunk)
1206 fp.write(chunk)
1207 fpin.close()
1207 fpin.close()
1208 fp.close()
1208 fp.close()
1209 err = 0
1209 err = 0
1210 return err
1210 return err
1211
1211
1212 def mercurialsinkbefore(orig, sink):
1212 def mercurialsinkbefore(orig, sink):
1213 sink.repo._isconverting = True
1213 sink.repo._isconverting = True
1214 orig(sink)
1214 orig(sink)
1215
1215
1216 def mercurialsinkafter(orig, sink):
1216 def mercurialsinkafter(orig, sink):
1217 sink.repo._isconverting = False
1217 sink.repo._isconverting = False
1218 orig(sink)
1218 orig(sink)
@@ -1,2167 +1,2167 b''
1 # cmdutil.py - help for command processing in mercurial
1 # cmdutil.py - help for command processing in mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import hex, nullid, nullrev, short
8 from node import hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import os, sys, errno, re, tempfile
10 import os, sys, errno, re, tempfile
11 import util, scmutil, templater, patch, error, templatekw, revlog, copies
11 import util, scmutil, templater, patch, error, templatekw, revlog, copies
12 import match as matchmod
12 import match as matchmod
13 import subrepo, context, repair, graphmod, revset, phases, obsolete
13 import subrepo, context, repair, graphmod, revset, phases, obsolete, pathutil
14 import changelog
14 import changelog
15 import bookmarks
15 import bookmarks
16 import lock as lockmod
16 import lock as lockmod
17
17
18 def parsealiases(cmd):
18 def parsealiases(cmd):
19 return cmd.lstrip("^").split("|")
19 return cmd.lstrip("^").split("|")
20
20
21 def findpossible(cmd, table, strict=False):
21 def findpossible(cmd, table, strict=False):
22 """
22 """
23 Return cmd -> (aliases, command table entry)
23 Return cmd -> (aliases, command table entry)
24 for each matching command.
24 for each matching command.
25 Return debug commands (or their aliases) only if no normal command matches.
25 Return debug commands (or their aliases) only if no normal command matches.
26 """
26 """
27 choice = {}
27 choice = {}
28 debugchoice = {}
28 debugchoice = {}
29
29
30 if cmd in table:
30 if cmd in table:
31 # short-circuit exact matches, "log" alias beats "^log|history"
31 # short-circuit exact matches, "log" alias beats "^log|history"
32 keys = [cmd]
32 keys = [cmd]
33 else:
33 else:
34 keys = table.keys()
34 keys = table.keys()
35
35
36 for e in keys:
36 for e in keys:
37 aliases = parsealiases(e)
37 aliases = parsealiases(e)
38 found = None
38 found = None
39 if cmd in aliases:
39 if cmd in aliases:
40 found = cmd
40 found = cmd
41 elif not strict:
41 elif not strict:
42 for a in aliases:
42 for a in aliases:
43 if a.startswith(cmd):
43 if a.startswith(cmd):
44 found = a
44 found = a
45 break
45 break
46 if found is not None:
46 if found is not None:
47 if aliases[0].startswith("debug") or found.startswith("debug"):
47 if aliases[0].startswith("debug") or found.startswith("debug"):
48 debugchoice[found] = (aliases, table[e])
48 debugchoice[found] = (aliases, table[e])
49 else:
49 else:
50 choice[found] = (aliases, table[e])
50 choice[found] = (aliases, table[e])
51
51
52 if not choice and debugchoice:
52 if not choice and debugchoice:
53 choice = debugchoice
53 choice = debugchoice
54
54
55 return choice
55 return choice
56
56
57 def findcmd(cmd, table, strict=True):
57 def findcmd(cmd, table, strict=True):
58 """Return (aliases, command table entry) for command string."""
58 """Return (aliases, command table entry) for command string."""
59 choice = findpossible(cmd, table, strict)
59 choice = findpossible(cmd, table, strict)
60
60
61 if cmd in choice:
61 if cmd in choice:
62 return choice[cmd]
62 return choice[cmd]
63
63
64 if len(choice) > 1:
64 if len(choice) > 1:
65 clist = choice.keys()
65 clist = choice.keys()
66 clist.sort()
66 clist.sort()
67 raise error.AmbiguousCommand(cmd, clist)
67 raise error.AmbiguousCommand(cmd, clist)
68
68
69 if choice:
69 if choice:
70 return choice.values()[0]
70 return choice.values()[0]
71
71
72 raise error.UnknownCommand(cmd)
72 raise error.UnknownCommand(cmd)
73
73
74 def findrepo(p):
74 def findrepo(p):
75 while not os.path.isdir(os.path.join(p, ".hg")):
75 while not os.path.isdir(os.path.join(p, ".hg")):
76 oldp, p = p, os.path.dirname(p)
76 oldp, p = p, os.path.dirname(p)
77 if p == oldp:
77 if p == oldp:
78 return None
78 return None
79
79
80 return p
80 return p
81
81
82 def bailifchanged(repo):
82 def bailifchanged(repo):
83 if repo.dirstate.p2() != nullid:
83 if repo.dirstate.p2() != nullid:
84 raise util.Abort(_('outstanding uncommitted merge'))
84 raise util.Abort(_('outstanding uncommitted merge'))
85 modified, added, removed, deleted = repo.status()[:4]
85 modified, added, removed, deleted = repo.status()[:4]
86 if modified or added or removed or deleted:
86 if modified or added or removed or deleted:
87 raise util.Abort(_('uncommitted changes'))
87 raise util.Abort(_('uncommitted changes'))
88 ctx = repo[None]
88 ctx = repo[None]
89 for s in sorted(ctx.substate):
89 for s in sorted(ctx.substate):
90 if ctx.sub(s).dirty():
90 if ctx.sub(s).dirty():
91 raise util.Abort(_("uncommitted changes in subrepo %s") % s)
91 raise util.Abort(_("uncommitted changes in subrepo %s") % s)
92
92
93 def logmessage(ui, opts):
93 def logmessage(ui, opts):
94 """ get the log message according to -m and -l option """
94 """ get the log message according to -m and -l option """
95 message = opts.get('message')
95 message = opts.get('message')
96 logfile = opts.get('logfile')
96 logfile = opts.get('logfile')
97
97
98 if message and logfile:
98 if message and logfile:
99 raise util.Abort(_('options --message and --logfile are mutually '
99 raise util.Abort(_('options --message and --logfile are mutually '
100 'exclusive'))
100 'exclusive'))
101 if not message and logfile:
101 if not message and logfile:
102 try:
102 try:
103 if logfile == '-':
103 if logfile == '-':
104 message = ui.fin.read()
104 message = ui.fin.read()
105 else:
105 else:
106 message = '\n'.join(util.readfile(logfile).splitlines())
106 message = '\n'.join(util.readfile(logfile).splitlines())
107 except IOError, inst:
107 except IOError, inst:
108 raise util.Abort(_("can't read commit message '%s': %s") %
108 raise util.Abort(_("can't read commit message '%s': %s") %
109 (logfile, inst.strerror))
109 (logfile, inst.strerror))
110 return message
110 return message
111
111
112 def loglimit(opts):
112 def loglimit(opts):
113 """get the log limit according to option -l/--limit"""
113 """get the log limit according to option -l/--limit"""
114 limit = opts.get('limit')
114 limit = opts.get('limit')
115 if limit:
115 if limit:
116 try:
116 try:
117 limit = int(limit)
117 limit = int(limit)
118 except ValueError:
118 except ValueError:
119 raise util.Abort(_('limit must be a positive integer'))
119 raise util.Abort(_('limit must be a positive integer'))
120 if limit <= 0:
120 if limit <= 0:
121 raise util.Abort(_('limit must be positive'))
121 raise util.Abort(_('limit must be positive'))
122 else:
122 else:
123 limit = None
123 limit = None
124 return limit
124 return limit
125
125
126 def makefilename(repo, pat, node, desc=None,
126 def makefilename(repo, pat, node, desc=None,
127 total=None, seqno=None, revwidth=None, pathname=None):
127 total=None, seqno=None, revwidth=None, pathname=None):
128 node_expander = {
128 node_expander = {
129 'H': lambda: hex(node),
129 'H': lambda: hex(node),
130 'R': lambda: str(repo.changelog.rev(node)),
130 'R': lambda: str(repo.changelog.rev(node)),
131 'h': lambda: short(node),
131 'h': lambda: short(node),
132 'm': lambda: re.sub('[^\w]', '_', str(desc))
132 'm': lambda: re.sub('[^\w]', '_', str(desc))
133 }
133 }
134 expander = {
134 expander = {
135 '%': lambda: '%',
135 '%': lambda: '%',
136 'b': lambda: os.path.basename(repo.root),
136 'b': lambda: os.path.basename(repo.root),
137 }
137 }
138
138
139 try:
139 try:
140 if node:
140 if node:
141 expander.update(node_expander)
141 expander.update(node_expander)
142 if node:
142 if node:
143 expander['r'] = (lambda:
143 expander['r'] = (lambda:
144 str(repo.changelog.rev(node)).zfill(revwidth or 0))
144 str(repo.changelog.rev(node)).zfill(revwidth or 0))
145 if total is not None:
145 if total is not None:
146 expander['N'] = lambda: str(total)
146 expander['N'] = lambda: str(total)
147 if seqno is not None:
147 if seqno is not None:
148 expander['n'] = lambda: str(seqno)
148 expander['n'] = lambda: str(seqno)
149 if total is not None and seqno is not None:
149 if total is not None and seqno is not None:
150 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
150 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
151 if pathname is not None:
151 if pathname is not None:
152 expander['s'] = lambda: os.path.basename(pathname)
152 expander['s'] = lambda: os.path.basename(pathname)
153 expander['d'] = lambda: os.path.dirname(pathname) or '.'
153 expander['d'] = lambda: os.path.dirname(pathname) or '.'
154 expander['p'] = lambda: pathname
154 expander['p'] = lambda: pathname
155
155
156 newname = []
156 newname = []
157 patlen = len(pat)
157 patlen = len(pat)
158 i = 0
158 i = 0
159 while i < patlen:
159 while i < patlen:
160 c = pat[i]
160 c = pat[i]
161 if c == '%':
161 if c == '%':
162 i += 1
162 i += 1
163 c = pat[i]
163 c = pat[i]
164 c = expander[c]()
164 c = expander[c]()
165 newname.append(c)
165 newname.append(c)
166 i += 1
166 i += 1
167 return ''.join(newname)
167 return ''.join(newname)
168 except KeyError, inst:
168 except KeyError, inst:
169 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
169 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
170 inst.args[0])
170 inst.args[0])
171
171
172 def makefileobj(repo, pat, node=None, desc=None, total=None,
172 def makefileobj(repo, pat, node=None, desc=None, total=None,
173 seqno=None, revwidth=None, mode='wb', modemap=None,
173 seqno=None, revwidth=None, mode='wb', modemap=None,
174 pathname=None):
174 pathname=None):
175
175
176 writable = mode not in ('r', 'rb')
176 writable = mode not in ('r', 'rb')
177
177
178 if not pat or pat == '-':
178 if not pat or pat == '-':
179 fp = writable and repo.ui.fout or repo.ui.fin
179 fp = writable and repo.ui.fout or repo.ui.fin
180 if util.safehasattr(fp, 'fileno'):
180 if util.safehasattr(fp, 'fileno'):
181 return os.fdopen(os.dup(fp.fileno()), mode)
181 return os.fdopen(os.dup(fp.fileno()), mode)
182 else:
182 else:
183 # if this fp can't be duped properly, return
183 # if this fp can't be duped properly, return
184 # a dummy object that can be closed
184 # a dummy object that can be closed
185 class wrappedfileobj(object):
185 class wrappedfileobj(object):
186 noop = lambda x: None
186 noop = lambda x: None
187 def __init__(self, f):
187 def __init__(self, f):
188 self.f = f
188 self.f = f
189 def __getattr__(self, attr):
189 def __getattr__(self, attr):
190 if attr == 'close':
190 if attr == 'close':
191 return self.noop
191 return self.noop
192 else:
192 else:
193 return getattr(self.f, attr)
193 return getattr(self.f, attr)
194
194
195 return wrappedfileobj(fp)
195 return wrappedfileobj(fp)
196 if util.safehasattr(pat, 'write') and writable:
196 if util.safehasattr(pat, 'write') and writable:
197 return pat
197 return pat
198 if util.safehasattr(pat, 'read') and 'r' in mode:
198 if util.safehasattr(pat, 'read') and 'r' in mode:
199 return pat
199 return pat
200 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
200 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
201 if modemap is not None:
201 if modemap is not None:
202 mode = modemap.get(fn, mode)
202 mode = modemap.get(fn, mode)
203 if mode == 'wb':
203 if mode == 'wb':
204 modemap[fn] = 'ab'
204 modemap[fn] = 'ab'
205 return open(fn, mode)
205 return open(fn, mode)
206
206
207 def openrevlog(repo, cmd, file_, opts):
207 def openrevlog(repo, cmd, file_, opts):
208 """opens the changelog, manifest, a filelog or a given revlog"""
208 """opens the changelog, manifest, a filelog or a given revlog"""
209 cl = opts['changelog']
209 cl = opts['changelog']
210 mf = opts['manifest']
210 mf = opts['manifest']
211 msg = None
211 msg = None
212 if cl and mf:
212 if cl and mf:
213 msg = _('cannot specify --changelog and --manifest at the same time')
213 msg = _('cannot specify --changelog and --manifest at the same time')
214 elif cl or mf:
214 elif cl or mf:
215 if file_:
215 if file_:
216 msg = _('cannot specify filename with --changelog or --manifest')
216 msg = _('cannot specify filename with --changelog or --manifest')
217 elif not repo:
217 elif not repo:
218 msg = _('cannot specify --changelog or --manifest '
218 msg = _('cannot specify --changelog or --manifest '
219 'without a repository')
219 'without a repository')
220 if msg:
220 if msg:
221 raise util.Abort(msg)
221 raise util.Abort(msg)
222
222
223 r = None
223 r = None
224 if repo:
224 if repo:
225 if cl:
225 if cl:
226 r = repo.changelog
226 r = repo.changelog
227 elif mf:
227 elif mf:
228 r = repo.manifest
228 r = repo.manifest
229 elif file_:
229 elif file_:
230 filelog = repo.file(file_)
230 filelog = repo.file(file_)
231 if len(filelog):
231 if len(filelog):
232 r = filelog
232 r = filelog
233 if not r:
233 if not r:
234 if not file_:
234 if not file_:
235 raise error.CommandError(cmd, _('invalid arguments'))
235 raise error.CommandError(cmd, _('invalid arguments'))
236 if not os.path.isfile(file_):
236 if not os.path.isfile(file_):
237 raise util.Abort(_("revlog '%s' not found") % file_)
237 raise util.Abort(_("revlog '%s' not found") % file_)
238 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
238 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
239 file_[:-2] + ".i")
239 file_[:-2] + ".i")
240 return r
240 return r
241
241
242 def copy(ui, repo, pats, opts, rename=False):
242 def copy(ui, repo, pats, opts, rename=False):
243 # called with the repo lock held
243 # called with the repo lock held
244 #
244 #
245 # hgsep => pathname that uses "/" to separate directories
245 # hgsep => pathname that uses "/" to separate directories
246 # ossep => pathname that uses os.sep to separate directories
246 # ossep => pathname that uses os.sep to separate directories
247 cwd = repo.getcwd()
247 cwd = repo.getcwd()
248 targets = {}
248 targets = {}
249 after = opts.get("after")
249 after = opts.get("after")
250 dryrun = opts.get("dry_run")
250 dryrun = opts.get("dry_run")
251 wctx = repo[None]
251 wctx = repo[None]
252
252
253 def walkpat(pat):
253 def walkpat(pat):
254 srcs = []
254 srcs = []
255 badstates = after and '?' or '?r'
255 badstates = after and '?' or '?r'
256 m = scmutil.match(repo[None], [pat], opts, globbed=True)
256 m = scmutil.match(repo[None], [pat], opts, globbed=True)
257 for abs in repo.walk(m):
257 for abs in repo.walk(m):
258 state = repo.dirstate[abs]
258 state = repo.dirstate[abs]
259 rel = m.rel(abs)
259 rel = m.rel(abs)
260 exact = m.exact(abs)
260 exact = m.exact(abs)
261 if state in badstates:
261 if state in badstates:
262 if exact and state == '?':
262 if exact and state == '?':
263 ui.warn(_('%s: not copying - file is not managed\n') % rel)
263 ui.warn(_('%s: not copying - file is not managed\n') % rel)
264 if exact and state == 'r':
264 if exact and state == 'r':
265 ui.warn(_('%s: not copying - file has been marked for'
265 ui.warn(_('%s: not copying - file has been marked for'
266 ' remove\n') % rel)
266 ' remove\n') % rel)
267 continue
267 continue
268 # abs: hgsep
268 # abs: hgsep
269 # rel: ossep
269 # rel: ossep
270 srcs.append((abs, rel, exact))
270 srcs.append((abs, rel, exact))
271 return srcs
271 return srcs
272
272
273 # abssrc: hgsep
273 # abssrc: hgsep
274 # relsrc: ossep
274 # relsrc: ossep
275 # otarget: ossep
275 # otarget: ossep
276 def copyfile(abssrc, relsrc, otarget, exact):
276 def copyfile(abssrc, relsrc, otarget, exact):
277 abstarget = scmutil.canonpath(repo.root, cwd, otarget)
277 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
278 if '/' in abstarget:
278 if '/' in abstarget:
279 # We cannot normalize abstarget itself, this would prevent
279 # We cannot normalize abstarget itself, this would prevent
280 # case only renames, like a => A.
280 # case only renames, like a => A.
281 abspath, absname = abstarget.rsplit('/', 1)
281 abspath, absname = abstarget.rsplit('/', 1)
282 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
282 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
283 reltarget = repo.pathto(abstarget, cwd)
283 reltarget = repo.pathto(abstarget, cwd)
284 target = repo.wjoin(abstarget)
284 target = repo.wjoin(abstarget)
285 src = repo.wjoin(abssrc)
285 src = repo.wjoin(abssrc)
286 state = repo.dirstate[abstarget]
286 state = repo.dirstate[abstarget]
287
287
288 scmutil.checkportable(ui, abstarget)
288 scmutil.checkportable(ui, abstarget)
289
289
290 # check for collisions
290 # check for collisions
291 prevsrc = targets.get(abstarget)
291 prevsrc = targets.get(abstarget)
292 if prevsrc is not None:
292 if prevsrc is not None:
293 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
293 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
294 (reltarget, repo.pathto(abssrc, cwd),
294 (reltarget, repo.pathto(abssrc, cwd),
295 repo.pathto(prevsrc, cwd)))
295 repo.pathto(prevsrc, cwd)))
296 return
296 return
297
297
298 # check for overwrites
298 # check for overwrites
299 exists = os.path.lexists(target)
299 exists = os.path.lexists(target)
300 samefile = False
300 samefile = False
301 if exists and abssrc != abstarget:
301 if exists and abssrc != abstarget:
302 if (repo.dirstate.normalize(abssrc) ==
302 if (repo.dirstate.normalize(abssrc) ==
303 repo.dirstate.normalize(abstarget)):
303 repo.dirstate.normalize(abstarget)):
304 if not rename:
304 if not rename:
305 ui.warn(_("%s: can't copy - same file\n") % reltarget)
305 ui.warn(_("%s: can't copy - same file\n") % reltarget)
306 return
306 return
307 exists = False
307 exists = False
308 samefile = True
308 samefile = True
309
309
310 if not after and exists or after and state in 'mn':
310 if not after and exists or after and state in 'mn':
311 if not opts['force']:
311 if not opts['force']:
312 ui.warn(_('%s: not overwriting - file exists\n') %
312 ui.warn(_('%s: not overwriting - file exists\n') %
313 reltarget)
313 reltarget)
314 return
314 return
315
315
316 if after:
316 if after:
317 if not exists:
317 if not exists:
318 if rename:
318 if rename:
319 ui.warn(_('%s: not recording move - %s does not exist\n') %
319 ui.warn(_('%s: not recording move - %s does not exist\n') %
320 (relsrc, reltarget))
320 (relsrc, reltarget))
321 else:
321 else:
322 ui.warn(_('%s: not recording copy - %s does not exist\n') %
322 ui.warn(_('%s: not recording copy - %s does not exist\n') %
323 (relsrc, reltarget))
323 (relsrc, reltarget))
324 return
324 return
325 elif not dryrun:
325 elif not dryrun:
326 try:
326 try:
327 if exists:
327 if exists:
328 os.unlink(target)
328 os.unlink(target)
329 targetdir = os.path.dirname(target) or '.'
329 targetdir = os.path.dirname(target) or '.'
330 if not os.path.isdir(targetdir):
330 if not os.path.isdir(targetdir):
331 os.makedirs(targetdir)
331 os.makedirs(targetdir)
332 if samefile:
332 if samefile:
333 tmp = target + "~hgrename"
333 tmp = target + "~hgrename"
334 os.rename(src, tmp)
334 os.rename(src, tmp)
335 os.rename(tmp, target)
335 os.rename(tmp, target)
336 else:
336 else:
337 util.copyfile(src, target)
337 util.copyfile(src, target)
338 srcexists = True
338 srcexists = True
339 except IOError, inst:
339 except IOError, inst:
340 if inst.errno == errno.ENOENT:
340 if inst.errno == errno.ENOENT:
341 ui.warn(_('%s: deleted in working copy\n') % relsrc)
341 ui.warn(_('%s: deleted in working copy\n') % relsrc)
342 srcexists = False
342 srcexists = False
343 else:
343 else:
344 ui.warn(_('%s: cannot copy - %s\n') %
344 ui.warn(_('%s: cannot copy - %s\n') %
345 (relsrc, inst.strerror))
345 (relsrc, inst.strerror))
346 return True # report a failure
346 return True # report a failure
347
347
348 if ui.verbose or not exact:
348 if ui.verbose or not exact:
349 if rename:
349 if rename:
350 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
350 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
351 else:
351 else:
352 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
352 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
353
353
354 targets[abstarget] = abssrc
354 targets[abstarget] = abssrc
355
355
356 # fix up dirstate
356 # fix up dirstate
357 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
357 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
358 dryrun=dryrun, cwd=cwd)
358 dryrun=dryrun, cwd=cwd)
359 if rename and not dryrun:
359 if rename and not dryrun:
360 if not after and srcexists and not samefile:
360 if not after and srcexists and not samefile:
361 util.unlinkpath(repo.wjoin(abssrc))
361 util.unlinkpath(repo.wjoin(abssrc))
362 wctx.forget([abssrc])
362 wctx.forget([abssrc])
363
363
364 # pat: ossep
364 # pat: ossep
365 # dest ossep
365 # dest ossep
366 # srcs: list of (hgsep, hgsep, ossep, bool)
366 # srcs: list of (hgsep, hgsep, ossep, bool)
367 # return: function that takes hgsep and returns ossep
367 # return: function that takes hgsep and returns ossep
368 def targetpathfn(pat, dest, srcs):
368 def targetpathfn(pat, dest, srcs):
369 if os.path.isdir(pat):
369 if os.path.isdir(pat):
370 abspfx = scmutil.canonpath(repo.root, cwd, pat)
370 abspfx = pathutil.canonpath(repo.root, cwd, pat)
371 abspfx = util.localpath(abspfx)
371 abspfx = util.localpath(abspfx)
372 if destdirexists:
372 if destdirexists:
373 striplen = len(os.path.split(abspfx)[0])
373 striplen = len(os.path.split(abspfx)[0])
374 else:
374 else:
375 striplen = len(abspfx)
375 striplen = len(abspfx)
376 if striplen:
376 if striplen:
377 striplen += len(os.sep)
377 striplen += len(os.sep)
378 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
378 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
379 elif destdirexists:
379 elif destdirexists:
380 res = lambda p: os.path.join(dest,
380 res = lambda p: os.path.join(dest,
381 os.path.basename(util.localpath(p)))
381 os.path.basename(util.localpath(p)))
382 else:
382 else:
383 res = lambda p: dest
383 res = lambda p: dest
384 return res
384 return res
385
385
386 # pat: ossep
386 # pat: ossep
387 # dest ossep
387 # dest ossep
388 # srcs: list of (hgsep, hgsep, ossep, bool)
388 # srcs: list of (hgsep, hgsep, ossep, bool)
389 # return: function that takes hgsep and returns ossep
389 # return: function that takes hgsep and returns ossep
390 def targetpathafterfn(pat, dest, srcs):
390 def targetpathafterfn(pat, dest, srcs):
391 if matchmod.patkind(pat):
391 if matchmod.patkind(pat):
392 # a mercurial pattern
392 # a mercurial pattern
393 res = lambda p: os.path.join(dest,
393 res = lambda p: os.path.join(dest,
394 os.path.basename(util.localpath(p)))
394 os.path.basename(util.localpath(p)))
395 else:
395 else:
396 abspfx = scmutil.canonpath(repo.root, cwd, pat)
396 abspfx = pathutil.canonpath(repo.root, cwd, pat)
397 if len(abspfx) < len(srcs[0][0]):
397 if len(abspfx) < len(srcs[0][0]):
398 # A directory. Either the target path contains the last
398 # A directory. Either the target path contains the last
399 # component of the source path or it does not.
399 # component of the source path or it does not.
400 def evalpath(striplen):
400 def evalpath(striplen):
401 score = 0
401 score = 0
402 for s in srcs:
402 for s in srcs:
403 t = os.path.join(dest, util.localpath(s[0])[striplen:])
403 t = os.path.join(dest, util.localpath(s[0])[striplen:])
404 if os.path.lexists(t):
404 if os.path.lexists(t):
405 score += 1
405 score += 1
406 return score
406 return score
407
407
408 abspfx = util.localpath(abspfx)
408 abspfx = util.localpath(abspfx)
409 striplen = len(abspfx)
409 striplen = len(abspfx)
410 if striplen:
410 if striplen:
411 striplen += len(os.sep)
411 striplen += len(os.sep)
412 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
412 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
413 score = evalpath(striplen)
413 score = evalpath(striplen)
414 striplen1 = len(os.path.split(abspfx)[0])
414 striplen1 = len(os.path.split(abspfx)[0])
415 if striplen1:
415 if striplen1:
416 striplen1 += len(os.sep)
416 striplen1 += len(os.sep)
417 if evalpath(striplen1) > score:
417 if evalpath(striplen1) > score:
418 striplen = striplen1
418 striplen = striplen1
419 res = lambda p: os.path.join(dest,
419 res = lambda p: os.path.join(dest,
420 util.localpath(p)[striplen:])
420 util.localpath(p)[striplen:])
421 else:
421 else:
422 # a file
422 # a file
423 if destdirexists:
423 if destdirexists:
424 res = lambda p: os.path.join(dest,
424 res = lambda p: os.path.join(dest,
425 os.path.basename(util.localpath(p)))
425 os.path.basename(util.localpath(p)))
426 else:
426 else:
427 res = lambda p: dest
427 res = lambda p: dest
428 return res
428 return res
429
429
430
430
431 pats = scmutil.expandpats(pats)
431 pats = scmutil.expandpats(pats)
432 if not pats:
432 if not pats:
433 raise util.Abort(_('no source or destination specified'))
433 raise util.Abort(_('no source or destination specified'))
434 if len(pats) == 1:
434 if len(pats) == 1:
435 raise util.Abort(_('no destination specified'))
435 raise util.Abort(_('no destination specified'))
436 dest = pats.pop()
436 dest = pats.pop()
437 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
437 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
438 if not destdirexists:
438 if not destdirexists:
439 if len(pats) > 1 or matchmod.patkind(pats[0]):
439 if len(pats) > 1 or matchmod.patkind(pats[0]):
440 raise util.Abort(_('with multiple sources, destination must be an '
440 raise util.Abort(_('with multiple sources, destination must be an '
441 'existing directory'))
441 'existing directory'))
442 if util.endswithsep(dest):
442 if util.endswithsep(dest):
443 raise util.Abort(_('destination %s is not a directory') % dest)
443 raise util.Abort(_('destination %s is not a directory') % dest)
444
444
445 tfn = targetpathfn
445 tfn = targetpathfn
446 if after:
446 if after:
447 tfn = targetpathafterfn
447 tfn = targetpathafterfn
448 copylist = []
448 copylist = []
449 for pat in pats:
449 for pat in pats:
450 srcs = walkpat(pat)
450 srcs = walkpat(pat)
451 if not srcs:
451 if not srcs:
452 continue
452 continue
453 copylist.append((tfn(pat, dest, srcs), srcs))
453 copylist.append((tfn(pat, dest, srcs), srcs))
454 if not copylist:
454 if not copylist:
455 raise util.Abort(_('no files to copy'))
455 raise util.Abort(_('no files to copy'))
456
456
457 errors = 0
457 errors = 0
458 for targetpath, srcs in copylist:
458 for targetpath, srcs in copylist:
459 for abssrc, relsrc, exact in srcs:
459 for abssrc, relsrc, exact in srcs:
460 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
460 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
461 errors += 1
461 errors += 1
462
462
463 if errors:
463 if errors:
464 ui.warn(_('(consider using --after)\n'))
464 ui.warn(_('(consider using --after)\n'))
465
465
466 return errors != 0
466 return errors != 0
467
467
468 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
468 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
469 runargs=None, appendpid=False):
469 runargs=None, appendpid=False):
470 '''Run a command as a service.'''
470 '''Run a command as a service.'''
471
471
472 def writepid(pid):
472 def writepid(pid):
473 if opts['pid_file']:
473 if opts['pid_file']:
474 mode = appendpid and 'a' or 'w'
474 mode = appendpid and 'a' or 'w'
475 fp = open(opts['pid_file'], mode)
475 fp = open(opts['pid_file'], mode)
476 fp.write(str(pid) + '\n')
476 fp.write(str(pid) + '\n')
477 fp.close()
477 fp.close()
478
478
479 if opts['daemon'] and not opts['daemon_pipefds']:
479 if opts['daemon'] and not opts['daemon_pipefds']:
480 # Signal child process startup with file removal
480 # Signal child process startup with file removal
481 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
481 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
482 os.close(lockfd)
482 os.close(lockfd)
483 try:
483 try:
484 if not runargs:
484 if not runargs:
485 runargs = util.hgcmd() + sys.argv[1:]
485 runargs = util.hgcmd() + sys.argv[1:]
486 runargs.append('--daemon-pipefds=%s' % lockpath)
486 runargs.append('--daemon-pipefds=%s' % lockpath)
487 # Don't pass --cwd to the child process, because we've already
487 # Don't pass --cwd to the child process, because we've already
488 # changed directory.
488 # changed directory.
489 for i in xrange(1, len(runargs)):
489 for i in xrange(1, len(runargs)):
490 if runargs[i].startswith('--cwd='):
490 if runargs[i].startswith('--cwd='):
491 del runargs[i]
491 del runargs[i]
492 break
492 break
493 elif runargs[i].startswith('--cwd'):
493 elif runargs[i].startswith('--cwd'):
494 del runargs[i:i + 2]
494 del runargs[i:i + 2]
495 break
495 break
496 def condfn():
496 def condfn():
497 return not os.path.exists(lockpath)
497 return not os.path.exists(lockpath)
498 pid = util.rundetached(runargs, condfn)
498 pid = util.rundetached(runargs, condfn)
499 if pid < 0:
499 if pid < 0:
500 raise util.Abort(_('child process failed to start'))
500 raise util.Abort(_('child process failed to start'))
501 writepid(pid)
501 writepid(pid)
502 finally:
502 finally:
503 try:
503 try:
504 os.unlink(lockpath)
504 os.unlink(lockpath)
505 except OSError, e:
505 except OSError, e:
506 if e.errno != errno.ENOENT:
506 if e.errno != errno.ENOENT:
507 raise
507 raise
508 if parentfn:
508 if parentfn:
509 return parentfn(pid)
509 return parentfn(pid)
510 else:
510 else:
511 return
511 return
512
512
513 if initfn:
513 if initfn:
514 initfn()
514 initfn()
515
515
516 if not opts['daemon']:
516 if not opts['daemon']:
517 writepid(os.getpid())
517 writepid(os.getpid())
518
518
519 if opts['daemon_pipefds']:
519 if opts['daemon_pipefds']:
520 lockpath = opts['daemon_pipefds']
520 lockpath = opts['daemon_pipefds']
521 try:
521 try:
522 os.setsid()
522 os.setsid()
523 except AttributeError:
523 except AttributeError:
524 pass
524 pass
525 os.unlink(lockpath)
525 os.unlink(lockpath)
526 util.hidewindow()
526 util.hidewindow()
527 sys.stdout.flush()
527 sys.stdout.flush()
528 sys.stderr.flush()
528 sys.stderr.flush()
529
529
530 nullfd = os.open(os.devnull, os.O_RDWR)
530 nullfd = os.open(os.devnull, os.O_RDWR)
531 logfilefd = nullfd
531 logfilefd = nullfd
532 if logfile:
532 if logfile:
533 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
533 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
534 os.dup2(nullfd, 0)
534 os.dup2(nullfd, 0)
535 os.dup2(logfilefd, 1)
535 os.dup2(logfilefd, 1)
536 os.dup2(logfilefd, 2)
536 os.dup2(logfilefd, 2)
537 if nullfd not in (0, 1, 2):
537 if nullfd not in (0, 1, 2):
538 os.close(nullfd)
538 os.close(nullfd)
539 if logfile and logfilefd not in (0, 1, 2):
539 if logfile and logfilefd not in (0, 1, 2):
540 os.close(logfilefd)
540 os.close(logfilefd)
541
541
542 if runfn:
542 if runfn:
543 return runfn()
543 return runfn()
544
544
545 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
545 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
546 opts=None):
546 opts=None):
547 '''export changesets as hg patches.'''
547 '''export changesets as hg patches.'''
548
548
549 total = len(revs)
549 total = len(revs)
550 revwidth = max([len(str(rev)) for rev in revs])
550 revwidth = max([len(str(rev)) for rev in revs])
551 filemode = {}
551 filemode = {}
552
552
553 def single(rev, seqno, fp):
553 def single(rev, seqno, fp):
554 ctx = repo[rev]
554 ctx = repo[rev]
555 node = ctx.node()
555 node = ctx.node()
556 parents = [p.node() for p in ctx.parents() if p]
556 parents = [p.node() for p in ctx.parents() if p]
557 branch = ctx.branch()
557 branch = ctx.branch()
558 if switch_parent:
558 if switch_parent:
559 parents.reverse()
559 parents.reverse()
560 prev = (parents and parents[0]) or nullid
560 prev = (parents and parents[0]) or nullid
561
561
562 shouldclose = False
562 shouldclose = False
563 if not fp and len(template) > 0:
563 if not fp and len(template) > 0:
564 desc_lines = ctx.description().rstrip().split('\n')
564 desc_lines = ctx.description().rstrip().split('\n')
565 desc = desc_lines[0] #Commit always has a first line.
565 desc = desc_lines[0] #Commit always has a first line.
566 fp = makefileobj(repo, template, node, desc=desc, total=total,
566 fp = makefileobj(repo, template, node, desc=desc, total=total,
567 seqno=seqno, revwidth=revwidth, mode='wb',
567 seqno=seqno, revwidth=revwidth, mode='wb',
568 modemap=filemode)
568 modemap=filemode)
569 if fp != template:
569 if fp != template:
570 shouldclose = True
570 shouldclose = True
571 if fp and fp != sys.stdout and util.safehasattr(fp, 'name'):
571 if fp and fp != sys.stdout and util.safehasattr(fp, 'name'):
572 repo.ui.note("%s\n" % fp.name)
572 repo.ui.note("%s\n" % fp.name)
573
573
574 if not fp:
574 if not fp:
575 write = repo.ui.write
575 write = repo.ui.write
576 else:
576 else:
577 def write(s, **kw):
577 def write(s, **kw):
578 fp.write(s)
578 fp.write(s)
579
579
580
580
581 write("# HG changeset patch\n")
581 write("# HG changeset patch\n")
582 write("# User %s\n" % ctx.user())
582 write("# User %s\n" % ctx.user())
583 write("# Date %d %d\n" % ctx.date())
583 write("# Date %d %d\n" % ctx.date())
584 write("# %s\n" % util.datestr(ctx.date()))
584 write("# %s\n" % util.datestr(ctx.date()))
585 if branch and branch != 'default':
585 if branch and branch != 'default':
586 write("# Branch %s\n" % branch)
586 write("# Branch %s\n" % branch)
587 write("# Node ID %s\n" % hex(node))
587 write("# Node ID %s\n" % hex(node))
588 write("# Parent %s\n" % hex(prev))
588 write("# Parent %s\n" % hex(prev))
589 if len(parents) > 1:
589 if len(parents) > 1:
590 write("# Parent %s\n" % hex(parents[1]))
590 write("# Parent %s\n" % hex(parents[1]))
591 write(ctx.description().rstrip())
591 write(ctx.description().rstrip())
592 write("\n\n")
592 write("\n\n")
593
593
594 for chunk, label in patch.diffui(repo, prev, node, opts=opts):
594 for chunk, label in patch.diffui(repo, prev, node, opts=opts):
595 write(chunk, label=label)
595 write(chunk, label=label)
596
596
597 if shouldclose:
597 if shouldclose:
598 fp.close()
598 fp.close()
599
599
600 for seqno, rev in enumerate(revs):
600 for seqno, rev in enumerate(revs):
601 single(rev, seqno + 1, fp)
601 single(rev, seqno + 1, fp)
602
602
603 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
603 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
604 changes=None, stat=False, fp=None, prefix='',
604 changes=None, stat=False, fp=None, prefix='',
605 listsubrepos=False):
605 listsubrepos=False):
606 '''show diff or diffstat.'''
606 '''show diff or diffstat.'''
607 if fp is None:
607 if fp is None:
608 write = ui.write
608 write = ui.write
609 else:
609 else:
610 def write(s, **kw):
610 def write(s, **kw):
611 fp.write(s)
611 fp.write(s)
612
612
613 if stat:
613 if stat:
614 diffopts = diffopts.copy(context=0)
614 diffopts = diffopts.copy(context=0)
615 width = 80
615 width = 80
616 if not ui.plain():
616 if not ui.plain():
617 width = ui.termwidth()
617 width = ui.termwidth()
618 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
618 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
619 prefix=prefix)
619 prefix=prefix)
620 for chunk, label in patch.diffstatui(util.iterlines(chunks),
620 for chunk, label in patch.diffstatui(util.iterlines(chunks),
621 width=width,
621 width=width,
622 git=diffopts.git):
622 git=diffopts.git):
623 write(chunk, label=label)
623 write(chunk, label=label)
624 else:
624 else:
625 for chunk, label in patch.diffui(repo, node1, node2, match,
625 for chunk, label in patch.diffui(repo, node1, node2, match,
626 changes, diffopts, prefix=prefix):
626 changes, diffopts, prefix=prefix):
627 write(chunk, label=label)
627 write(chunk, label=label)
628
628
629 if listsubrepos:
629 if listsubrepos:
630 ctx1 = repo[node1]
630 ctx1 = repo[node1]
631 ctx2 = repo[node2]
631 ctx2 = repo[node2]
632 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
632 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
633 tempnode2 = node2
633 tempnode2 = node2
634 try:
634 try:
635 if node2 is not None:
635 if node2 is not None:
636 tempnode2 = ctx2.substate[subpath][1]
636 tempnode2 = ctx2.substate[subpath][1]
637 except KeyError:
637 except KeyError:
638 # A subrepo that existed in node1 was deleted between node1 and
638 # A subrepo that existed in node1 was deleted between node1 and
639 # node2 (inclusive). Thus, ctx2's substate won't contain that
639 # node2 (inclusive). Thus, ctx2's substate won't contain that
640 # subpath. The best we can do is to ignore it.
640 # subpath. The best we can do is to ignore it.
641 tempnode2 = None
641 tempnode2 = None
642 submatch = matchmod.narrowmatcher(subpath, match)
642 submatch = matchmod.narrowmatcher(subpath, match)
643 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
643 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
644 stat=stat, fp=fp, prefix=prefix)
644 stat=stat, fp=fp, prefix=prefix)
645
645
646 class changeset_printer(object):
646 class changeset_printer(object):
647 '''show changeset information when templating not requested.'''
647 '''show changeset information when templating not requested.'''
648
648
649 def __init__(self, ui, repo, patch, diffopts, buffered):
649 def __init__(self, ui, repo, patch, diffopts, buffered):
650 self.ui = ui
650 self.ui = ui
651 self.repo = repo
651 self.repo = repo
652 self.buffered = buffered
652 self.buffered = buffered
653 self.patch = patch
653 self.patch = patch
654 self.diffopts = diffopts
654 self.diffopts = diffopts
655 self.header = {}
655 self.header = {}
656 self.hunk = {}
656 self.hunk = {}
657 self.lastheader = None
657 self.lastheader = None
658 self.footer = None
658 self.footer = None
659
659
660 def flush(self, rev):
660 def flush(self, rev):
661 if rev in self.header:
661 if rev in self.header:
662 h = self.header[rev]
662 h = self.header[rev]
663 if h != self.lastheader:
663 if h != self.lastheader:
664 self.lastheader = h
664 self.lastheader = h
665 self.ui.write(h)
665 self.ui.write(h)
666 del self.header[rev]
666 del self.header[rev]
667 if rev in self.hunk:
667 if rev in self.hunk:
668 self.ui.write(self.hunk[rev])
668 self.ui.write(self.hunk[rev])
669 del self.hunk[rev]
669 del self.hunk[rev]
670 return 1
670 return 1
671 return 0
671 return 0
672
672
673 def close(self):
673 def close(self):
674 if self.footer:
674 if self.footer:
675 self.ui.write(self.footer)
675 self.ui.write(self.footer)
676
676
677 def show(self, ctx, copies=None, matchfn=None, **props):
677 def show(self, ctx, copies=None, matchfn=None, **props):
678 if self.buffered:
678 if self.buffered:
679 self.ui.pushbuffer()
679 self.ui.pushbuffer()
680 self._show(ctx, copies, matchfn, props)
680 self._show(ctx, copies, matchfn, props)
681 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
681 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
682 else:
682 else:
683 self._show(ctx, copies, matchfn, props)
683 self._show(ctx, copies, matchfn, props)
684
684
685 def _show(self, ctx, copies, matchfn, props):
685 def _show(self, ctx, copies, matchfn, props):
686 '''show a single changeset or file revision'''
686 '''show a single changeset or file revision'''
687 changenode = ctx.node()
687 changenode = ctx.node()
688 rev = ctx.rev()
688 rev = ctx.rev()
689
689
690 if self.ui.quiet:
690 if self.ui.quiet:
691 self.ui.write("%d:%s\n" % (rev, short(changenode)),
691 self.ui.write("%d:%s\n" % (rev, short(changenode)),
692 label='log.node')
692 label='log.node')
693 return
693 return
694
694
695 log = self.repo.changelog
695 log = self.repo.changelog
696 date = util.datestr(ctx.date())
696 date = util.datestr(ctx.date())
697
697
698 hexfunc = self.ui.debugflag and hex or short
698 hexfunc = self.ui.debugflag and hex or short
699
699
700 parents = [(p, hexfunc(log.node(p)))
700 parents = [(p, hexfunc(log.node(p)))
701 for p in self._meaningful_parentrevs(log, rev)]
701 for p in self._meaningful_parentrevs(log, rev)]
702
702
703 # i18n: column positioning for "hg log"
703 # i18n: column positioning for "hg log"
704 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)),
704 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)),
705 label='log.changeset changeset.%s' % ctx.phasestr())
705 label='log.changeset changeset.%s' % ctx.phasestr())
706
706
707 branch = ctx.branch()
707 branch = ctx.branch()
708 # don't show the default branch name
708 # don't show the default branch name
709 if branch != 'default':
709 if branch != 'default':
710 # i18n: column positioning for "hg log"
710 # i18n: column positioning for "hg log"
711 self.ui.write(_("branch: %s\n") % branch,
711 self.ui.write(_("branch: %s\n") % branch,
712 label='log.branch')
712 label='log.branch')
713 for bookmark in self.repo.nodebookmarks(changenode):
713 for bookmark in self.repo.nodebookmarks(changenode):
714 # i18n: column positioning for "hg log"
714 # i18n: column positioning for "hg log"
715 self.ui.write(_("bookmark: %s\n") % bookmark,
715 self.ui.write(_("bookmark: %s\n") % bookmark,
716 label='log.bookmark')
716 label='log.bookmark')
717 for tag in self.repo.nodetags(changenode):
717 for tag in self.repo.nodetags(changenode):
718 # i18n: column positioning for "hg log"
718 # i18n: column positioning for "hg log"
719 self.ui.write(_("tag: %s\n") % tag,
719 self.ui.write(_("tag: %s\n") % tag,
720 label='log.tag')
720 label='log.tag')
721 if self.ui.debugflag and ctx.phase():
721 if self.ui.debugflag and ctx.phase():
722 # i18n: column positioning for "hg log"
722 # i18n: column positioning for "hg log"
723 self.ui.write(_("phase: %s\n") % _(ctx.phasestr()),
723 self.ui.write(_("phase: %s\n") % _(ctx.phasestr()),
724 label='log.phase')
724 label='log.phase')
725 for parent in parents:
725 for parent in parents:
726 # i18n: column positioning for "hg log"
726 # i18n: column positioning for "hg log"
727 self.ui.write(_("parent: %d:%s\n") % parent,
727 self.ui.write(_("parent: %d:%s\n") % parent,
728 label='log.parent changeset.%s' % ctx.phasestr())
728 label='log.parent changeset.%s' % ctx.phasestr())
729
729
730 if self.ui.debugflag:
730 if self.ui.debugflag:
731 mnode = ctx.manifestnode()
731 mnode = ctx.manifestnode()
732 # i18n: column positioning for "hg log"
732 # i18n: column positioning for "hg log"
733 self.ui.write(_("manifest: %d:%s\n") %
733 self.ui.write(_("manifest: %d:%s\n") %
734 (self.repo.manifest.rev(mnode), hex(mnode)),
734 (self.repo.manifest.rev(mnode), hex(mnode)),
735 label='ui.debug log.manifest')
735 label='ui.debug log.manifest')
736 # i18n: column positioning for "hg log"
736 # i18n: column positioning for "hg log"
737 self.ui.write(_("user: %s\n") % ctx.user(),
737 self.ui.write(_("user: %s\n") % ctx.user(),
738 label='log.user')
738 label='log.user')
739 # i18n: column positioning for "hg log"
739 # i18n: column positioning for "hg log"
740 self.ui.write(_("date: %s\n") % date,
740 self.ui.write(_("date: %s\n") % date,
741 label='log.date')
741 label='log.date')
742
742
743 if self.ui.debugflag:
743 if self.ui.debugflag:
744 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
744 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
745 for key, value in zip([# i18n: column positioning for "hg log"
745 for key, value in zip([# i18n: column positioning for "hg log"
746 _("files:"),
746 _("files:"),
747 # i18n: column positioning for "hg log"
747 # i18n: column positioning for "hg log"
748 _("files+:"),
748 _("files+:"),
749 # i18n: column positioning for "hg log"
749 # i18n: column positioning for "hg log"
750 _("files-:")], files):
750 _("files-:")], files):
751 if value:
751 if value:
752 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
752 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
753 label='ui.debug log.files')
753 label='ui.debug log.files')
754 elif ctx.files() and self.ui.verbose:
754 elif ctx.files() and self.ui.verbose:
755 # i18n: column positioning for "hg log"
755 # i18n: column positioning for "hg log"
756 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
756 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
757 label='ui.note log.files')
757 label='ui.note log.files')
758 if copies and self.ui.verbose:
758 if copies and self.ui.verbose:
759 copies = ['%s (%s)' % c for c in copies]
759 copies = ['%s (%s)' % c for c in copies]
760 # i18n: column positioning for "hg log"
760 # i18n: column positioning for "hg log"
761 self.ui.write(_("copies: %s\n") % ' '.join(copies),
761 self.ui.write(_("copies: %s\n") % ' '.join(copies),
762 label='ui.note log.copies')
762 label='ui.note log.copies')
763
763
764 extra = ctx.extra()
764 extra = ctx.extra()
765 if extra and self.ui.debugflag:
765 if extra and self.ui.debugflag:
766 for key, value in sorted(extra.items()):
766 for key, value in sorted(extra.items()):
767 # i18n: column positioning for "hg log"
767 # i18n: column positioning for "hg log"
768 self.ui.write(_("extra: %s=%s\n")
768 self.ui.write(_("extra: %s=%s\n")
769 % (key, value.encode('string_escape')),
769 % (key, value.encode('string_escape')),
770 label='ui.debug log.extra')
770 label='ui.debug log.extra')
771
771
772 description = ctx.description().strip()
772 description = ctx.description().strip()
773 if description:
773 if description:
774 if self.ui.verbose:
774 if self.ui.verbose:
775 self.ui.write(_("description:\n"),
775 self.ui.write(_("description:\n"),
776 label='ui.note log.description')
776 label='ui.note log.description')
777 self.ui.write(description,
777 self.ui.write(description,
778 label='ui.note log.description')
778 label='ui.note log.description')
779 self.ui.write("\n\n")
779 self.ui.write("\n\n")
780 else:
780 else:
781 # i18n: column positioning for "hg log"
781 # i18n: column positioning for "hg log"
782 self.ui.write(_("summary: %s\n") %
782 self.ui.write(_("summary: %s\n") %
783 description.splitlines()[0],
783 description.splitlines()[0],
784 label='log.summary')
784 label='log.summary')
785 self.ui.write("\n")
785 self.ui.write("\n")
786
786
787 self.showpatch(changenode, matchfn)
787 self.showpatch(changenode, matchfn)
788
788
789 def showpatch(self, node, matchfn):
789 def showpatch(self, node, matchfn):
790 if not matchfn:
790 if not matchfn:
791 matchfn = self.patch
791 matchfn = self.patch
792 if matchfn:
792 if matchfn:
793 stat = self.diffopts.get('stat')
793 stat = self.diffopts.get('stat')
794 diff = self.diffopts.get('patch')
794 diff = self.diffopts.get('patch')
795 diffopts = patch.diffopts(self.ui, self.diffopts)
795 diffopts = patch.diffopts(self.ui, self.diffopts)
796 prev = self.repo.changelog.parents(node)[0]
796 prev = self.repo.changelog.parents(node)[0]
797 if stat:
797 if stat:
798 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
798 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
799 match=matchfn, stat=True)
799 match=matchfn, stat=True)
800 if diff:
800 if diff:
801 if stat:
801 if stat:
802 self.ui.write("\n")
802 self.ui.write("\n")
803 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
803 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
804 match=matchfn, stat=False)
804 match=matchfn, stat=False)
805 self.ui.write("\n")
805 self.ui.write("\n")
806
806
807 def _meaningful_parentrevs(self, log, rev):
807 def _meaningful_parentrevs(self, log, rev):
808 """Return list of meaningful (or all if debug) parentrevs for rev.
808 """Return list of meaningful (or all if debug) parentrevs for rev.
809
809
810 For merges (two non-nullrev revisions) both parents are meaningful.
810 For merges (two non-nullrev revisions) both parents are meaningful.
811 Otherwise the first parent revision is considered meaningful if it
811 Otherwise the first parent revision is considered meaningful if it
812 is not the preceding revision.
812 is not the preceding revision.
813 """
813 """
814 parents = log.parentrevs(rev)
814 parents = log.parentrevs(rev)
815 if not self.ui.debugflag and parents[1] == nullrev:
815 if not self.ui.debugflag and parents[1] == nullrev:
816 if parents[0] >= rev - 1:
816 if parents[0] >= rev - 1:
817 parents = []
817 parents = []
818 else:
818 else:
819 parents = [parents[0]]
819 parents = [parents[0]]
820 return parents
820 return parents
821
821
822
822
823 class changeset_templater(changeset_printer):
823 class changeset_templater(changeset_printer):
824 '''format changeset information.'''
824 '''format changeset information.'''
825
825
826 def __init__(self, ui, repo, patch, diffopts, mapfile, buffered):
826 def __init__(self, ui, repo, patch, diffopts, mapfile, buffered):
827 changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
827 changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
828 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
828 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
829 defaulttempl = {
829 defaulttempl = {
830 'parent': '{rev}:{node|formatnode} ',
830 'parent': '{rev}:{node|formatnode} ',
831 'manifest': '{rev}:{node|formatnode}',
831 'manifest': '{rev}:{node|formatnode}',
832 'file_copy': '{name} ({source})',
832 'file_copy': '{name} ({source})',
833 'extra': '{key}={value|stringescape}'
833 'extra': '{key}={value|stringescape}'
834 }
834 }
835 # filecopy is preserved for compatibility reasons
835 # filecopy is preserved for compatibility reasons
836 defaulttempl['filecopy'] = defaulttempl['file_copy']
836 defaulttempl['filecopy'] = defaulttempl['file_copy']
837 self.t = templater.templater(mapfile, {'formatnode': formatnode},
837 self.t = templater.templater(mapfile, {'formatnode': formatnode},
838 cache=defaulttempl)
838 cache=defaulttempl)
839 self.cache = {}
839 self.cache = {}
840
840
841 def use_template(self, t):
841 def use_template(self, t):
842 '''set template string to use'''
842 '''set template string to use'''
843 self.t.cache['changeset'] = t
843 self.t.cache['changeset'] = t
844
844
845 def _meaningful_parentrevs(self, ctx):
845 def _meaningful_parentrevs(self, ctx):
846 """Return list of meaningful (or all if debug) parentrevs for rev.
846 """Return list of meaningful (or all if debug) parentrevs for rev.
847 """
847 """
848 parents = ctx.parents()
848 parents = ctx.parents()
849 if len(parents) > 1:
849 if len(parents) > 1:
850 return parents
850 return parents
851 if self.ui.debugflag:
851 if self.ui.debugflag:
852 return [parents[0], self.repo['null']]
852 return [parents[0], self.repo['null']]
853 if parents[0].rev() >= ctx.rev() - 1:
853 if parents[0].rev() >= ctx.rev() - 1:
854 return []
854 return []
855 return parents
855 return parents
856
856
857 def _show(self, ctx, copies, matchfn, props):
857 def _show(self, ctx, copies, matchfn, props):
858 '''show a single changeset or file revision'''
858 '''show a single changeset or file revision'''
859
859
860 showlist = templatekw.showlist
860 showlist = templatekw.showlist
861
861
862 # showparents() behaviour depends on ui trace level which
862 # showparents() behaviour depends on ui trace level which
863 # causes unexpected behaviours at templating level and makes
863 # causes unexpected behaviours at templating level and makes
864 # it harder to extract it in a standalone function. Its
864 # it harder to extract it in a standalone function. Its
865 # behaviour cannot be changed so leave it here for now.
865 # behaviour cannot be changed so leave it here for now.
866 def showparents(**args):
866 def showparents(**args):
867 ctx = args['ctx']
867 ctx = args['ctx']
868 parents = [[('rev', p.rev()), ('node', p.hex())]
868 parents = [[('rev', p.rev()), ('node', p.hex())]
869 for p in self._meaningful_parentrevs(ctx)]
869 for p in self._meaningful_parentrevs(ctx)]
870 return showlist('parent', parents, **args)
870 return showlist('parent', parents, **args)
871
871
872 props = props.copy()
872 props = props.copy()
873 props.update(templatekw.keywords)
873 props.update(templatekw.keywords)
874 props['parents'] = showparents
874 props['parents'] = showparents
875 props['templ'] = self.t
875 props['templ'] = self.t
876 props['ctx'] = ctx
876 props['ctx'] = ctx
877 props['repo'] = self.repo
877 props['repo'] = self.repo
878 props['revcache'] = {'copies': copies}
878 props['revcache'] = {'copies': copies}
879 props['cache'] = self.cache
879 props['cache'] = self.cache
880
880
881 # find correct templates for current mode
881 # find correct templates for current mode
882
882
883 tmplmodes = [
883 tmplmodes = [
884 (True, None),
884 (True, None),
885 (self.ui.verbose, 'verbose'),
885 (self.ui.verbose, 'verbose'),
886 (self.ui.quiet, 'quiet'),
886 (self.ui.quiet, 'quiet'),
887 (self.ui.debugflag, 'debug'),
887 (self.ui.debugflag, 'debug'),
888 ]
888 ]
889
889
890 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
890 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
891 for mode, postfix in tmplmodes:
891 for mode, postfix in tmplmodes:
892 for type in types:
892 for type in types:
893 cur = postfix and ('%s_%s' % (type, postfix)) or type
893 cur = postfix and ('%s_%s' % (type, postfix)) or type
894 if mode and cur in self.t:
894 if mode and cur in self.t:
895 types[type] = cur
895 types[type] = cur
896
896
897 try:
897 try:
898
898
899 # write header
899 # write header
900 if types['header']:
900 if types['header']:
901 h = templater.stringify(self.t(types['header'], **props))
901 h = templater.stringify(self.t(types['header'], **props))
902 if self.buffered:
902 if self.buffered:
903 self.header[ctx.rev()] = h
903 self.header[ctx.rev()] = h
904 else:
904 else:
905 if self.lastheader != h:
905 if self.lastheader != h:
906 self.lastheader = h
906 self.lastheader = h
907 self.ui.write(h)
907 self.ui.write(h)
908
908
909 # write changeset metadata, then patch if requested
909 # write changeset metadata, then patch if requested
910 key = types['changeset']
910 key = types['changeset']
911 self.ui.write(templater.stringify(self.t(key, **props)))
911 self.ui.write(templater.stringify(self.t(key, **props)))
912 self.showpatch(ctx.node(), matchfn)
912 self.showpatch(ctx.node(), matchfn)
913
913
914 if types['footer']:
914 if types['footer']:
915 if not self.footer:
915 if not self.footer:
916 self.footer = templater.stringify(self.t(types['footer'],
916 self.footer = templater.stringify(self.t(types['footer'],
917 **props))
917 **props))
918
918
919 except KeyError, inst:
919 except KeyError, inst:
920 msg = _("%s: no key named '%s'")
920 msg = _("%s: no key named '%s'")
921 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
921 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
922 except SyntaxError, inst:
922 except SyntaxError, inst:
923 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
923 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
924
924
925 def show_changeset(ui, repo, opts, buffered=False):
925 def show_changeset(ui, repo, opts, buffered=False):
926 """show one changeset using template or regular display.
926 """show one changeset using template or regular display.
927
927
928 Display format will be the first non-empty hit of:
928 Display format will be the first non-empty hit of:
929 1. option 'template'
929 1. option 'template'
930 2. option 'style'
930 2. option 'style'
931 3. [ui] setting 'logtemplate'
931 3. [ui] setting 'logtemplate'
932 4. [ui] setting 'style'
932 4. [ui] setting 'style'
933 If all of these values are either the unset or the empty string,
933 If all of these values are either the unset or the empty string,
934 regular display via changeset_printer() is done.
934 regular display via changeset_printer() is done.
935 """
935 """
936 # options
936 # options
937 patch = None
937 patch = None
938 if opts.get('patch') or opts.get('stat'):
938 if opts.get('patch') or opts.get('stat'):
939 patch = scmutil.matchall(repo)
939 patch = scmutil.matchall(repo)
940
940
941 tmpl = opts.get('template')
941 tmpl = opts.get('template')
942 style = None
942 style = None
943 if tmpl:
943 if tmpl:
944 tmpl = templater.parsestring(tmpl, quoted=False)
944 tmpl = templater.parsestring(tmpl, quoted=False)
945 else:
945 else:
946 style = opts.get('style')
946 style = opts.get('style')
947
947
948 # ui settings
948 # ui settings
949 if not (tmpl or style):
949 if not (tmpl or style):
950 tmpl = ui.config('ui', 'logtemplate')
950 tmpl = ui.config('ui', 'logtemplate')
951 if tmpl:
951 if tmpl:
952 try:
952 try:
953 tmpl = templater.parsestring(tmpl)
953 tmpl = templater.parsestring(tmpl)
954 except SyntaxError:
954 except SyntaxError:
955 tmpl = templater.parsestring(tmpl, quoted=False)
955 tmpl = templater.parsestring(tmpl, quoted=False)
956 else:
956 else:
957 style = util.expandpath(ui.config('ui', 'style', ''))
957 style = util.expandpath(ui.config('ui', 'style', ''))
958
958
959 if not (tmpl or style):
959 if not (tmpl or style):
960 return changeset_printer(ui, repo, patch, opts, buffered)
960 return changeset_printer(ui, repo, patch, opts, buffered)
961
961
962 mapfile = None
962 mapfile = None
963 if style and not tmpl:
963 if style and not tmpl:
964 mapfile = style
964 mapfile = style
965 if not os.path.split(mapfile)[0]:
965 if not os.path.split(mapfile)[0]:
966 mapname = (templater.templatepath('map-cmdline.' + mapfile)
966 mapname = (templater.templatepath('map-cmdline.' + mapfile)
967 or templater.templatepath(mapfile))
967 or templater.templatepath(mapfile))
968 if mapname:
968 if mapname:
969 mapfile = mapname
969 mapfile = mapname
970
970
971 try:
971 try:
972 t = changeset_templater(ui, repo, patch, opts, mapfile, buffered)
972 t = changeset_templater(ui, repo, patch, opts, mapfile, buffered)
973 except SyntaxError, inst:
973 except SyntaxError, inst:
974 raise util.Abort(inst.args[0])
974 raise util.Abort(inst.args[0])
975 if tmpl:
975 if tmpl:
976 t.use_template(tmpl)
976 t.use_template(tmpl)
977 return t
977 return t
978
978
979 def finddate(ui, repo, date):
979 def finddate(ui, repo, date):
980 """Find the tipmost changeset that matches the given date spec"""
980 """Find the tipmost changeset that matches the given date spec"""
981
981
982 df = util.matchdate(date)
982 df = util.matchdate(date)
983 m = scmutil.matchall(repo)
983 m = scmutil.matchall(repo)
984 results = {}
984 results = {}
985
985
986 def prep(ctx, fns):
986 def prep(ctx, fns):
987 d = ctx.date()
987 d = ctx.date()
988 if df(d[0]):
988 if df(d[0]):
989 results[ctx.rev()] = d
989 results[ctx.rev()] = d
990
990
991 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
991 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
992 rev = ctx.rev()
992 rev = ctx.rev()
993 if rev in results:
993 if rev in results:
994 ui.status(_("found revision %s from %s\n") %
994 ui.status(_("found revision %s from %s\n") %
995 (rev, util.datestr(results[rev])))
995 (rev, util.datestr(results[rev])))
996 return str(rev)
996 return str(rev)
997
997
998 raise util.Abort(_("revision matching date not found"))
998 raise util.Abort(_("revision matching date not found"))
999
999
1000 def increasingwindows(start, end, windowsize=8, sizelimit=512):
1000 def increasingwindows(start, end, windowsize=8, sizelimit=512):
1001 if start < end:
1001 if start < end:
1002 while start < end:
1002 while start < end:
1003 yield start, min(windowsize, end - start)
1003 yield start, min(windowsize, end - start)
1004 start += windowsize
1004 start += windowsize
1005 if windowsize < sizelimit:
1005 if windowsize < sizelimit:
1006 windowsize *= 2
1006 windowsize *= 2
1007 else:
1007 else:
1008 while start > end:
1008 while start > end:
1009 yield start, min(windowsize, start - end - 1)
1009 yield start, min(windowsize, start - end - 1)
1010 start -= windowsize
1010 start -= windowsize
1011 if windowsize < sizelimit:
1011 if windowsize < sizelimit:
1012 windowsize *= 2
1012 windowsize *= 2
1013
1013
1014 class FileWalkError(Exception):
1014 class FileWalkError(Exception):
1015 pass
1015 pass
1016
1016
1017 def walkfilerevs(repo, match, follow, revs, fncache):
1017 def walkfilerevs(repo, match, follow, revs, fncache):
1018 '''Walks the file history for the matched files.
1018 '''Walks the file history for the matched files.
1019
1019
1020 Returns the changeset revs that are involved in the file history.
1020 Returns the changeset revs that are involved in the file history.
1021
1021
1022 Throws FileWalkError if the file history can't be walked using
1022 Throws FileWalkError if the file history can't be walked using
1023 filelogs alone.
1023 filelogs alone.
1024 '''
1024 '''
1025 wanted = set()
1025 wanted = set()
1026 copies = []
1026 copies = []
1027 minrev, maxrev = min(revs), max(revs)
1027 minrev, maxrev = min(revs), max(revs)
1028 def filerevgen(filelog, last):
1028 def filerevgen(filelog, last):
1029 """
1029 """
1030 Only files, no patterns. Check the history of each file.
1030 Only files, no patterns. Check the history of each file.
1031
1031
1032 Examines filelog entries within minrev, maxrev linkrev range
1032 Examines filelog entries within minrev, maxrev linkrev range
1033 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1033 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1034 tuples in backwards order
1034 tuples in backwards order
1035 """
1035 """
1036 cl_count = len(repo)
1036 cl_count = len(repo)
1037 revs = []
1037 revs = []
1038 for j in xrange(0, last + 1):
1038 for j in xrange(0, last + 1):
1039 linkrev = filelog.linkrev(j)
1039 linkrev = filelog.linkrev(j)
1040 if linkrev < minrev:
1040 if linkrev < minrev:
1041 continue
1041 continue
1042 # only yield rev for which we have the changelog, it can
1042 # only yield rev for which we have the changelog, it can
1043 # happen while doing "hg log" during a pull or commit
1043 # happen while doing "hg log" during a pull or commit
1044 if linkrev >= cl_count:
1044 if linkrev >= cl_count:
1045 break
1045 break
1046
1046
1047 parentlinkrevs = []
1047 parentlinkrevs = []
1048 for p in filelog.parentrevs(j):
1048 for p in filelog.parentrevs(j):
1049 if p != nullrev:
1049 if p != nullrev:
1050 parentlinkrevs.append(filelog.linkrev(p))
1050 parentlinkrevs.append(filelog.linkrev(p))
1051 n = filelog.node(j)
1051 n = filelog.node(j)
1052 revs.append((linkrev, parentlinkrevs,
1052 revs.append((linkrev, parentlinkrevs,
1053 follow and filelog.renamed(n)))
1053 follow and filelog.renamed(n)))
1054
1054
1055 return reversed(revs)
1055 return reversed(revs)
1056 def iterfiles():
1056 def iterfiles():
1057 pctx = repo['.']
1057 pctx = repo['.']
1058 for filename in match.files():
1058 for filename in match.files():
1059 if follow:
1059 if follow:
1060 if filename not in pctx:
1060 if filename not in pctx:
1061 raise util.Abort(_('cannot follow file not in parent '
1061 raise util.Abort(_('cannot follow file not in parent '
1062 'revision: "%s"') % filename)
1062 'revision: "%s"') % filename)
1063 yield filename, pctx[filename].filenode()
1063 yield filename, pctx[filename].filenode()
1064 else:
1064 else:
1065 yield filename, None
1065 yield filename, None
1066 for filename_node in copies:
1066 for filename_node in copies:
1067 yield filename_node
1067 yield filename_node
1068
1068
1069 for file_, node in iterfiles():
1069 for file_, node in iterfiles():
1070 filelog = repo.file(file_)
1070 filelog = repo.file(file_)
1071 if not len(filelog):
1071 if not len(filelog):
1072 if node is None:
1072 if node is None:
1073 # A zero count may be a directory or deleted file, so
1073 # A zero count may be a directory or deleted file, so
1074 # try to find matching entries on the slow path.
1074 # try to find matching entries on the slow path.
1075 if follow:
1075 if follow:
1076 raise util.Abort(
1076 raise util.Abort(
1077 _('cannot follow nonexistent file: "%s"') % file_)
1077 _('cannot follow nonexistent file: "%s"') % file_)
1078 raise FileWalkError("Cannot walk via filelog")
1078 raise FileWalkError("Cannot walk via filelog")
1079 else:
1079 else:
1080 continue
1080 continue
1081
1081
1082 if node is None:
1082 if node is None:
1083 last = len(filelog) - 1
1083 last = len(filelog) - 1
1084 else:
1084 else:
1085 last = filelog.rev(node)
1085 last = filelog.rev(node)
1086
1086
1087
1087
1088 # keep track of all ancestors of the file
1088 # keep track of all ancestors of the file
1089 ancestors = set([filelog.linkrev(last)])
1089 ancestors = set([filelog.linkrev(last)])
1090
1090
1091 # iterate from latest to oldest revision
1091 # iterate from latest to oldest revision
1092 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1092 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1093 if not follow:
1093 if not follow:
1094 if rev > maxrev:
1094 if rev > maxrev:
1095 continue
1095 continue
1096 else:
1096 else:
1097 # Note that last might not be the first interesting
1097 # Note that last might not be the first interesting
1098 # rev to us:
1098 # rev to us:
1099 # if the file has been changed after maxrev, we'll
1099 # if the file has been changed after maxrev, we'll
1100 # have linkrev(last) > maxrev, and we still need
1100 # have linkrev(last) > maxrev, and we still need
1101 # to explore the file graph
1101 # to explore the file graph
1102 if rev not in ancestors:
1102 if rev not in ancestors:
1103 continue
1103 continue
1104 # XXX insert 1327 fix here
1104 # XXX insert 1327 fix here
1105 if flparentlinkrevs:
1105 if flparentlinkrevs:
1106 ancestors.update(flparentlinkrevs)
1106 ancestors.update(flparentlinkrevs)
1107
1107
1108 fncache.setdefault(rev, []).append(file_)
1108 fncache.setdefault(rev, []).append(file_)
1109 wanted.add(rev)
1109 wanted.add(rev)
1110 if copied:
1110 if copied:
1111 copies.append(copied)
1111 copies.append(copied)
1112
1112
1113 return wanted
1113 return wanted
1114
1114
1115 def walkchangerevs(repo, match, opts, prepare):
1115 def walkchangerevs(repo, match, opts, prepare):
1116 '''Iterate over files and the revs in which they changed.
1116 '''Iterate over files and the revs in which they changed.
1117
1117
1118 Callers most commonly need to iterate backwards over the history
1118 Callers most commonly need to iterate backwards over the history
1119 in which they are interested. Doing so has awful (quadratic-looking)
1119 in which they are interested. Doing so has awful (quadratic-looking)
1120 performance, so we use iterators in a "windowed" way.
1120 performance, so we use iterators in a "windowed" way.
1121
1121
1122 We walk a window of revisions in the desired order. Within the
1122 We walk a window of revisions in the desired order. Within the
1123 window, we first walk forwards to gather data, then in the desired
1123 window, we first walk forwards to gather data, then in the desired
1124 order (usually backwards) to display it.
1124 order (usually backwards) to display it.
1125
1125
1126 This function returns an iterator yielding contexts. Before
1126 This function returns an iterator yielding contexts. Before
1127 yielding each context, the iterator will first call the prepare
1127 yielding each context, the iterator will first call the prepare
1128 function on each context in the window in forward order.'''
1128 function on each context in the window in forward order.'''
1129
1129
1130 follow = opts.get('follow') or opts.get('follow_first')
1130 follow = opts.get('follow') or opts.get('follow_first')
1131
1131
1132 if opts.get('rev'):
1132 if opts.get('rev'):
1133 revs = scmutil.revrange(repo, opts.get('rev'))
1133 revs = scmutil.revrange(repo, opts.get('rev'))
1134 elif follow:
1134 elif follow:
1135 revs = repo.revs('reverse(:.)')
1135 revs = repo.revs('reverse(:.)')
1136 else:
1136 else:
1137 revs = list(repo)
1137 revs = list(repo)
1138 revs.reverse()
1138 revs.reverse()
1139 if not revs:
1139 if not revs:
1140 return []
1140 return []
1141 wanted = set()
1141 wanted = set()
1142 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1142 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1143 fncache = {}
1143 fncache = {}
1144 change = repo.changectx
1144 change = repo.changectx
1145
1145
1146 # First step is to fill wanted, the set of revisions that we want to yield.
1146 # First step is to fill wanted, the set of revisions that we want to yield.
1147 # When it does not induce extra cost, we also fill fncache for revisions in
1147 # When it does not induce extra cost, we also fill fncache for revisions in
1148 # wanted: a cache of filenames that were changed (ctx.files()) and that
1148 # wanted: a cache of filenames that were changed (ctx.files()) and that
1149 # match the file filtering conditions.
1149 # match the file filtering conditions.
1150
1150
1151 if not slowpath and not match.files():
1151 if not slowpath and not match.files():
1152 # No files, no patterns. Display all revs.
1152 # No files, no patterns. Display all revs.
1153 wanted = set(revs)
1153 wanted = set(revs)
1154
1154
1155 if not slowpath and match.files():
1155 if not slowpath and match.files():
1156 # We only have to read through the filelog to find wanted revisions
1156 # We only have to read through the filelog to find wanted revisions
1157
1157
1158 try:
1158 try:
1159 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1159 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1160 except FileWalkError:
1160 except FileWalkError:
1161 slowpath = True
1161 slowpath = True
1162
1162
1163 # We decided to fall back to the slowpath because at least one
1163 # We decided to fall back to the slowpath because at least one
1164 # of the paths was not a file. Check to see if at least one of them
1164 # of the paths was not a file. Check to see if at least one of them
1165 # existed in history, otherwise simply return
1165 # existed in history, otherwise simply return
1166 for path in match.files():
1166 for path in match.files():
1167 if path == '.' or path in repo.store:
1167 if path == '.' or path in repo.store:
1168 break
1168 break
1169 else:
1169 else:
1170 return []
1170 return []
1171
1171
1172 if slowpath:
1172 if slowpath:
1173 # We have to read the changelog to match filenames against
1173 # We have to read the changelog to match filenames against
1174 # changed files
1174 # changed files
1175
1175
1176 if follow:
1176 if follow:
1177 raise util.Abort(_('can only follow copies/renames for explicit '
1177 raise util.Abort(_('can only follow copies/renames for explicit '
1178 'filenames'))
1178 'filenames'))
1179
1179
1180 # The slow path checks files modified in every changeset.
1180 # The slow path checks files modified in every changeset.
1181 # This is really slow on large repos, so compute the set lazily.
1181 # This is really slow on large repos, so compute the set lazily.
1182 class lazywantedset(object):
1182 class lazywantedset(object):
1183 def __init__(self):
1183 def __init__(self):
1184 self.set = set()
1184 self.set = set()
1185 self.revs = set(revs)
1185 self.revs = set(revs)
1186
1186
1187 # No need to worry about locality here because it will be accessed
1187 # No need to worry about locality here because it will be accessed
1188 # in the same order as the increasing window below.
1188 # in the same order as the increasing window below.
1189 def __contains__(self, value):
1189 def __contains__(self, value):
1190 if value in self.set:
1190 if value in self.set:
1191 return True
1191 return True
1192 elif not value in self.revs:
1192 elif not value in self.revs:
1193 return False
1193 return False
1194 else:
1194 else:
1195 self.revs.discard(value)
1195 self.revs.discard(value)
1196 ctx = change(value)
1196 ctx = change(value)
1197 matches = filter(match, ctx.files())
1197 matches = filter(match, ctx.files())
1198 if matches:
1198 if matches:
1199 fncache[value] = matches
1199 fncache[value] = matches
1200 self.set.add(value)
1200 self.set.add(value)
1201 return True
1201 return True
1202 return False
1202 return False
1203
1203
1204 def discard(self, value):
1204 def discard(self, value):
1205 self.revs.discard(value)
1205 self.revs.discard(value)
1206 self.set.discard(value)
1206 self.set.discard(value)
1207
1207
1208 wanted = lazywantedset()
1208 wanted = lazywantedset()
1209
1209
1210 class followfilter(object):
1210 class followfilter(object):
1211 def __init__(self, onlyfirst=False):
1211 def __init__(self, onlyfirst=False):
1212 self.startrev = nullrev
1212 self.startrev = nullrev
1213 self.roots = set()
1213 self.roots = set()
1214 self.onlyfirst = onlyfirst
1214 self.onlyfirst = onlyfirst
1215
1215
1216 def match(self, rev):
1216 def match(self, rev):
1217 def realparents(rev):
1217 def realparents(rev):
1218 if self.onlyfirst:
1218 if self.onlyfirst:
1219 return repo.changelog.parentrevs(rev)[0:1]
1219 return repo.changelog.parentrevs(rev)[0:1]
1220 else:
1220 else:
1221 return filter(lambda x: x != nullrev,
1221 return filter(lambda x: x != nullrev,
1222 repo.changelog.parentrevs(rev))
1222 repo.changelog.parentrevs(rev))
1223
1223
1224 if self.startrev == nullrev:
1224 if self.startrev == nullrev:
1225 self.startrev = rev
1225 self.startrev = rev
1226 return True
1226 return True
1227
1227
1228 if rev > self.startrev:
1228 if rev > self.startrev:
1229 # forward: all descendants
1229 # forward: all descendants
1230 if not self.roots:
1230 if not self.roots:
1231 self.roots.add(self.startrev)
1231 self.roots.add(self.startrev)
1232 for parent in realparents(rev):
1232 for parent in realparents(rev):
1233 if parent in self.roots:
1233 if parent in self.roots:
1234 self.roots.add(rev)
1234 self.roots.add(rev)
1235 return True
1235 return True
1236 else:
1236 else:
1237 # backwards: all parents
1237 # backwards: all parents
1238 if not self.roots:
1238 if not self.roots:
1239 self.roots.update(realparents(self.startrev))
1239 self.roots.update(realparents(self.startrev))
1240 if rev in self.roots:
1240 if rev in self.roots:
1241 self.roots.remove(rev)
1241 self.roots.remove(rev)
1242 self.roots.update(realparents(rev))
1242 self.roots.update(realparents(rev))
1243 return True
1243 return True
1244
1244
1245 return False
1245 return False
1246
1246
1247 # it might be worthwhile to do this in the iterator if the rev range
1247 # it might be worthwhile to do this in the iterator if the rev range
1248 # is descending and the prune args are all within that range
1248 # is descending and the prune args are all within that range
1249 for rev in opts.get('prune', ()):
1249 for rev in opts.get('prune', ()):
1250 rev = repo[rev].rev()
1250 rev = repo[rev].rev()
1251 ff = followfilter()
1251 ff = followfilter()
1252 stop = min(revs[0], revs[-1])
1252 stop = min(revs[0], revs[-1])
1253 for x in xrange(rev, stop - 1, -1):
1253 for x in xrange(rev, stop - 1, -1):
1254 if ff.match(x):
1254 if ff.match(x):
1255 wanted.discard(x)
1255 wanted.discard(x)
1256
1256
1257 # Choose a small initial window if we will probably only visit a
1257 # Choose a small initial window if we will probably only visit a
1258 # few commits.
1258 # few commits.
1259 limit = loglimit(opts)
1259 limit = loglimit(opts)
1260 windowsize = 8
1260 windowsize = 8
1261 if limit:
1261 if limit:
1262 windowsize = min(limit, windowsize)
1262 windowsize = min(limit, windowsize)
1263
1263
1264 # Now that wanted is correctly initialized, we can iterate over the
1264 # Now that wanted is correctly initialized, we can iterate over the
1265 # revision range, yielding only revisions in wanted.
1265 # revision range, yielding only revisions in wanted.
1266 def iterate():
1266 def iterate():
1267 if follow and not match.files():
1267 if follow and not match.files():
1268 ff = followfilter(onlyfirst=opts.get('follow_first'))
1268 ff = followfilter(onlyfirst=opts.get('follow_first'))
1269 def want(rev):
1269 def want(rev):
1270 return ff.match(rev) and rev in wanted
1270 return ff.match(rev) and rev in wanted
1271 else:
1271 else:
1272 def want(rev):
1272 def want(rev):
1273 return rev in wanted
1273 return rev in wanted
1274
1274
1275 for i, window in increasingwindows(0, len(revs), windowsize):
1275 for i, window in increasingwindows(0, len(revs), windowsize):
1276 nrevs = [rev for rev in revs[i:i + window] if want(rev)]
1276 nrevs = [rev for rev in revs[i:i + window] if want(rev)]
1277 for rev in sorted(nrevs):
1277 for rev in sorted(nrevs):
1278 fns = fncache.get(rev)
1278 fns = fncache.get(rev)
1279 ctx = change(rev)
1279 ctx = change(rev)
1280 if not fns:
1280 if not fns:
1281 def fns_generator():
1281 def fns_generator():
1282 for f in ctx.files():
1282 for f in ctx.files():
1283 if match(f):
1283 if match(f):
1284 yield f
1284 yield f
1285 fns = fns_generator()
1285 fns = fns_generator()
1286 prepare(ctx, fns)
1286 prepare(ctx, fns)
1287 for rev in nrevs:
1287 for rev in nrevs:
1288 yield change(rev)
1288 yield change(rev)
1289 return iterate()
1289 return iterate()
1290
1290
1291 def _makegraphfilematcher(repo, pats, followfirst):
1291 def _makegraphfilematcher(repo, pats, followfirst):
1292 # When displaying a revision with --patch --follow FILE, we have
1292 # When displaying a revision with --patch --follow FILE, we have
1293 # to know which file of the revision must be diffed. With
1293 # to know which file of the revision must be diffed. With
1294 # --follow, we want the names of the ancestors of FILE in the
1294 # --follow, we want the names of the ancestors of FILE in the
1295 # revision, stored in "fcache". "fcache" is populated by
1295 # revision, stored in "fcache". "fcache" is populated by
1296 # reproducing the graph traversal already done by --follow revset
1296 # reproducing the graph traversal already done by --follow revset
1297 # and relating linkrevs to file names (which is not "correct" but
1297 # and relating linkrevs to file names (which is not "correct" but
1298 # good enough).
1298 # good enough).
1299 fcache = {}
1299 fcache = {}
1300 fcacheready = [False]
1300 fcacheready = [False]
1301 pctx = repo['.']
1301 pctx = repo['.']
1302 wctx = repo[None]
1302 wctx = repo[None]
1303
1303
1304 def populate():
1304 def populate():
1305 for fn in pats:
1305 for fn in pats:
1306 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
1306 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
1307 for c in i:
1307 for c in i:
1308 fcache.setdefault(c.linkrev(), set()).add(c.path())
1308 fcache.setdefault(c.linkrev(), set()).add(c.path())
1309
1309
1310 def filematcher(rev):
1310 def filematcher(rev):
1311 if not fcacheready[0]:
1311 if not fcacheready[0]:
1312 # Lazy initialization
1312 # Lazy initialization
1313 fcacheready[0] = True
1313 fcacheready[0] = True
1314 populate()
1314 populate()
1315 return scmutil.match(wctx, fcache.get(rev, []), default='path')
1315 return scmutil.match(wctx, fcache.get(rev, []), default='path')
1316
1316
1317 return filematcher
1317 return filematcher
1318
1318
1319 def _makegraphlogrevset(repo, pats, opts, revs):
1319 def _makegraphlogrevset(repo, pats, opts, revs):
1320 """Return (expr, filematcher) where expr is a revset string built
1320 """Return (expr, filematcher) where expr is a revset string built
1321 from log options and file patterns or None. If --stat or --patch
1321 from log options and file patterns or None. If --stat or --patch
1322 are not passed filematcher is None. Otherwise it is a callable
1322 are not passed filematcher is None. Otherwise it is a callable
1323 taking a revision number and returning a match objects filtering
1323 taking a revision number and returning a match objects filtering
1324 the files to be detailed when displaying the revision.
1324 the files to be detailed when displaying the revision.
1325 """
1325 """
1326 opt2revset = {
1326 opt2revset = {
1327 'no_merges': ('not merge()', None),
1327 'no_merges': ('not merge()', None),
1328 'only_merges': ('merge()', None),
1328 'only_merges': ('merge()', None),
1329 '_ancestors': ('ancestors(%(val)s)', None),
1329 '_ancestors': ('ancestors(%(val)s)', None),
1330 '_fancestors': ('_firstancestors(%(val)s)', None),
1330 '_fancestors': ('_firstancestors(%(val)s)', None),
1331 '_descendants': ('descendants(%(val)s)', None),
1331 '_descendants': ('descendants(%(val)s)', None),
1332 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1332 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1333 '_matchfiles': ('_matchfiles(%(val)s)', None),
1333 '_matchfiles': ('_matchfiles(%(val)s)', None),
1334 'date': ('date(%(val)r)', None),
1334 'date': ('date(%(val)r)', None),
1335 'branch': ('branch(%(val)r)', ' or '),
1335 'branch': ('branch(%(val)r)', ' or '),
1336 '_patslog': ('filelog(%(val)r)', ' or '),
1336 '_patslog': ('filelog(%(val)r)', ' or '),
1337 '_patsfollow': ('follow(%(val)r)', ' or '),
1337 '_patsfollow': ('follow(%(val)r)', ' or '),
1338 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1338 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1339 'keyword': ('keyword(%(val)r)', ' or '),
1339 'keyword': ('keyword(%(val)r)', ' or '),
1340 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1340 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1341 'user': ('user(%(val)r)', ' or '),
1341 'user': ('user(%(val)r)', ' or '),
1342 }
1342 }
1343
1343
1344 opts = dict(opts)
1344 opts = dict(opts)
1345 # follow or not follow?
1345 # follow or not follow?
1346 follow = opts.get('follow') or opts.get('follow_first')
1346 follow = opts.get('follow') or opts.get('follow_first')
1347 followfirst = opts.get('follow_first') and 1 or 0
1347 followfirst = opts.get('follow_first') and 1 or 0
1348 # --follow with FILE behaviour depends on revs...
1348 # --follow with FILE behaviour depends on revs...
1349 startrev = revs[0]
1349 startrev = revs[0]
1350 followdescendants = (len(revs) > 1 and revs[0] < revs[1]) and 1 or 0
1350 followdescendants = (len(revs) > 1 and revs[0] < revs[1]) and 1 or 0
1351
1351
1352 # branch and only_branch are really aliases and must be handled at
1352 # branch and only_branch are really aliases and must be handled at
1353 # the same time
1353 # the same time
1354 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
1354 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
1355 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
1355 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
1356 # pats/include/exclude are passed to match.match() directly in
1356 # pats/include/exclude are passed to match.match() directly in
1357 # _matchfiles() revset but walkchangerevs() builds its matcher with
1357 # _matchfiles() revset but walkchangerevs() builds its matcher with
1358 # scmutil.match(). The difference is input pats are globbed on
1358 # scmutil.match(). The difference is input pats are globbed on
1359 # platforms without shell expansion (windows).
1359 # platforms without shell expansion (windows).
1360 pctx = repo[None]
1360 pctx = repo[None]
1361 match, pats = scmutil.matchandpats(pctx, pats, opts)
1361 match, pats = scmutil.matchandpats(pctx, pats, opts)
1362 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1362 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1363 if not slowpath:
1363 if not slowpath:
1364 for f in match.files():
1364 for f in match.files():
1365 if follow and f not in pctx:
1365 if follow and f not in pctx:
1366 raise util.Abort(_('cannot follow file not in parent '
1366 raise util.Abort(_('cannot follow file not in parent '
1367 'revision: "%s"') % f)
1367 'revision: "%s"') % f)
1368 filelog = repo.file(f)
1368 filelog = repo.file(f)
1369 if not filelog:
1369 if not filelog:
1370 # A zero count may be a directory or deleted file, so
1370 # A zero count may be a directory or deleted file, so
1371 # try to find matching entries on the slow path.
1371 # try to find matching entries on the slow path.
1372 if follow:
1372 if follow:
1373 raise util.Abort(
1373 raise util.Abort(
1374 _('cannot follow nonexistent file: "%s"') % f)
1374 _('cannot follow nonexistent file: "%s"') % f)
1375 slowpath = True
1375 slowpath = True
1376
1376
1377 # We decided to fall back to the slowpath because at least one
1377 # We decided to fall back to the slowpath because at least one
1378 # of the paths was not a file. Check to see if at least one of them
1378 # of the paths was not a file. Check to see if at least one of them
1379 # existed in history - in that case, we'll continue down the
1379 # existed in history - in that case, we'll continue down the
1380 # slowpath; otherwise, we can turn off the slowpath
1380 # slowpath; otherwise, we can turn off the slowpath
1381 if slowpath:
1381 if slowpath:
1382 for path in match.files():
1382 for path in match.files():
1383 if path == '.' or path in repo.store:
1383 if path == '.' or path in repo.store:
1384 break
1384 break
1385 else:
1385 else:
1386 slowpath = False
1386 slowpath = False
1387
1387
1388 if slowpath:
1388 if slowpath:
1389 # See walkchangerevs() slow path.
1389 # See walkchangerevs() slow path.
1390 #
1390 #
1391 if follow:
1391 if follow:
1392 raise util.Abort(_('can only follow copies/renames for explicit '
1392 raise util.Abort(_('can only follow copies/renames for explicit '
1393 'filenames'))
1393 'filenames'))
1394 # pats/include/exclude cannot be represented as separate
1394 # pats/include/exclude cannot be represented as separate
1395 # revset expressions as their filtering logic applies at file
1395 # revset expressions as their filtering logic applies at file
1396 # level. For instance "-I a -X a" matches a revision touching
1396 # level. For instance "-I a -X a" matches a revision touching
1397 # "a" and "b" while "file(a) and not file(b)" does
1397 # "a" and "b" while "file(a) and not file(b)" does
1398 # not. Besides, filesets are evaluated against the working
1398 # not. Besides, filesets are evaluated against the working
1399 # directory.
1399 # directory.
1400 matchargs = ['r:', 'd:relpath']
1400 matchargs = ['r:', 'd:relpath']
1401 for p in pats:
1401 for p in pats:
1402 matchargs.append('p:' + p)
1402 matchargs.append('p:' + p)
1403 for p in opts.get('include', []):
1403 for p in opts.get('include', []):
1404 matchargs.append('i:' + p)
1404 matchargs.append('i:' + p)
1405 for p in opts.get('exclude', []):
1405 for p in opts.get('exclude', []):
1406 matchargs.append('x:' + p)
1406 matchargs.append('x:' + p)
1407 matchargs = ','.join(('%r' % p) for p in matchargs)
1407 matchargs = ','.join(('%r' % p) for p in matchargs)
1408 opts['_matchfiles'] = matchargs
1408 opts['_matchfiles'] = matchargs
1409 else:
1409 else:
1410 if follow:
1410 if follow:
1411 fpats = ('_patsfollow', '_patsfollowfirst')
1411 fpats = ('_patsfollow', '_patsfollowfirst')
1412 fnopats = (('_ancestors', '_fancestors'),
1412 fnopats = (('_ancestors', '_fancestors'),
1413 ('_descendants', '_fdescendants'))
1413 ('_descendants', '_fdescendants'))
1414 if pats:
1414 if pats:
1415 # follow() revset interprets its file argument as a
1415 # follow() revset interprets its file argument as a
1416 # manifest entry, so use match.files(), not pats.
1416 # manifest entry, so use match.files(), not pats.
1417 opts[fpats[followfirst]] = list(match.files())
1417 opts[fpats[followfirst]] = list(match.files())
1418 else:
1418 else:
1419 opts[fnopats[followdescendants][followfirst]] = str(startrev)
1419 opts[fnopats[followdescendants][followfirst]] = str(startrev)
1420 else:
1420 else:
1421 opts['_patslog'] = list(pats)
1421 opts['_patslog'] = list(pats)
1422
1422
1423 filematcher = None
1423 filematcher = None
1424 if opts.get('patch') or opts.get('stat'):
1424 if opts.get('patch') or opts.get('stat'):
1425 if follow:
1425 if follow:
1426 filematcher = _makegraphfilematcher(repo, pats, followfirst)
1426 filematcher = _makegraphfilematcher(repo, pats, followfirst)
1427 else:
1427 else:
1428 filematcher = lambda rev: match
1428 filematcher = lambda rev: match
1429
1429
1430 expr = []
1430 expr = []
1431 for op, val in opts.iteritems():
1431 for op, val in opts.iteritems():
1432 if not val:
1432 if not val:
1433 continue
1433 continue
1434 if op not in opt2revset:
1434 if op not in opt2revset:
1435 continue
1435 continue
1436 revop, andor = opt2revset[op]
1436 revop, andor = opt2revset[op]
1437 if '%(val)' not in revop:
1437 if '%(val)' not in revop:
1438 expr.append(revop)
1438 expr.append(revop)
1439 else:
1439 else:
1440 if not isinstance(val, list):
1440 if not isinstance(val, list):
1441 e = revop % {'val': val}
1441 e = revop % {'val': val}
1442 else:
1442 else:
1443 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
1443 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
1444 expr.append(e)
1444 expr.append(e)
1445
1445
1446 if expr:
1446 if expr:
1447 expr = '(' + ' and '.join(expr) + ')'
1447 expr = '(' + ' and '.join(expr) + ')'
1448 else:
1448 else:
1449 expr = None
1449 expr = None
1450 return expr, filematcher
1450 return expr, filematcher
1451
1451
1452 def getgraphlogrevs(repo, pats, opts):
1452 def getgraphlogrevs(repo, pats, opts):
1453 """Return (revs, expr, filematcher) where revs is an iterable of
1453 """Return (revs, expr, filematcher) where revs is an iterable of
1454 revision numbers, expr is a revset string built from log options
1454 revision numbers, expr is a revset string built from log options
1455 and file patterns or None, and used to filter 'revs'. If --stat or
1455 and file patterns or None, and used to filter 'revs'. If --stat or
1456 --patch are not passed filematcher is None. Otherwise it is a
1456 --patch are not passed filematcher is None. Otherwise it is a
1457 callable taking a revision number and returning a match objects
1457 callable taking a revision number and returning a match objects
1458 filtering the files to be detailed when displaying the revision.
1458 filtering the files to be detailed when displaying the revision.
1459 """
1459 """
1460 if not len(repo):
1460 if not len(repo):
1461 return [], None, None
1461 return [], None, None
1462 limit = loglimit(opts)
1462 limit = loglimit(opts)
1463 # Default --rev value depends on --follow but --follow behaviour
1463 # Default --rev value depends on --follow but --follow behaviour
1464 # depends on revisions resolved from --rev...
1464 # depends on revisions resolved from --rev...
1465 follow = opts.get('follow') or opts.get('follow_first')
1465 follow = opts.get('follow') or opts.get('follow_first')
1466 possiblyunsorted = False # whether revs might need sorting
1466 possiblyunsorted = False # whether revs might need sorting
1467 if opts.get('rev'):
1467 if opts.get('rev'):
1468 revs = scmutil.revrange(repo, opts['rev'])
1468 revs = scmutil.revrange(repo, opts['rev'])
1469 # Don't sort here because _makegraphlogrevset might depend on the
1469 # Don't sort here because _makegraphlogrevset might depend on the
1470 # order of revs
1470 # order of revs
1471 possiblyunsorted = True
1471 possiblyunsorted = True
1472 else:
1472 else:
1473 if follow and len(repo) > 0:
1473 if follow and len(repo) > 0:
1474 revs = repo.revs('reverse(:.)')
1474 revs = repo.revs('reverse(:.)')
1475 else:
1475 else:
1476 revs = list(repo.changelog)
1476 revs = list(repo.changelog)
1477 revs.reverse()
1477 revs.reverse()
1478 if not revs:
1478 if not revs:
1479 return [], None, None
1479 return [], None, None
1480 expr, filematcher = _makegraphlogrevset(repo, pats, opts, revs)
1480 expr, filematcher = _makegraphlogrevset(repo, pats, opts, revs)
1481 if possiblyunsorted:
1481 if possiblyunsorted:
1482 revs.sort(reverse=True)
1482 revs.sort(reverse=True)
1483 if expr:
1483 if expr:
1484 # Revset matchers often operate faster on revisions in changelog
1484 # Revset matchers often operate faster on revisions in changelog
1485 # order, because most filters deal with the changelog.
1485 # order, because most filters deal with the changelog.
1486 revs.reverse()
1486 revs.reverse()
1487 matcher = revset.match(repo.ui, expr)
1487 matcher = revset.match(repo.ui, expr)
1488 # Revset matches can reorder revisions. "A or B" typically returns
1488 # Revset matches can reorder revisions. "A or B" typically returns
1489 # returns the revision matching A then the revision matching B. Sort
1489 # returns the revision matching A then the revision matching B. Sort
1490 # again to fix that.
1490 # again to fix that.
1491 revs = matcher(repo, revs)
1491 revs = matcher(repo, revs)
1492 revs.sort(reverse=True)
1492 revs.sort(reverse=True)
1493 if limit is not None:
1493 if limit is not None:
1494 revs = revs[:limit]
1494 revs = revs[:limit]
1495
1495
1496 return revs, expr, filematcher
1496 return revs, expr, filematcher
1497
1497
1498 def displaygraph(ui, dag, displayer, showparents, edgefn, getrenamed=None,
1498 def displaygraph(ui, dag, displayer, showparents, edgefn, getrenamed=None,
1499 filematcher=None):
1499 filematcher=None):
1500 seen, state = [], graphmod.asciistate()
1500 seen, state = [], graphmod.asciistate()
1501 for rev, type, ctx, parents in dag:
1501 for rev, type, ctx, parents in dag:
1502 char = 'o'
1502 char = 'o'
1503 if ctx.node() in showparents:
1503 if ctx.node() in showparents:
1504 char = '@'
1504 char = '@'
1505 elif ctx.obsolete():
1505 elif ctx.obsolete():
1506 char = 'x'
1506 char = 'x'
1507 copies = None
1507 copies = None
1508 if getrenamed and ctx.rev():
1508 if getrenamed and ctx.rev():
1509 copies = []
1509 copies = []
1510 for fn in ctx.files():
1510 for fn in ctx.files():
1511 rename = getrenamed(fn, ctx.rev())
1511 rename = getrenamed(fn, ctx.rev())
1512 if rename:
1512 if rename:
1513 copies.append((fn, rename[0]))
1513 copies.append((fn, rename[0]))
1514 revmatchfn = None
1514 revmatchfn = None
1515 if filematcher is not None:
1515 if filematcher is not None:
1516 revmatchfn = filematcher(ctx.rev())
1516 revmatchfn = filematcher(ctx.rev())
1517 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
1517 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
1518 lines = displayer.hunk.pop(rev).split('\n')
1518 lines = displayer.hunk.pop(rev).split('\n')
1519 if not lines[-1]:
1519 if not lines[-1]:
1520 del lines[-1]
1520 del lines[-1]
1521 displayer.flush(rev)
1521 displayer.flush(rev)
1522 edges = edgefn(type, char, lines, seen, rev, parents)
1522 edges = edgefn(type, char, lines, seen, rev, parents)
1523 for type, char, lines, coldata in edges:
1523 for type, char, lines, coldata in edges:
1524 graphmod.ascii(ui, state, type, char, lines, coldata)
1524 graphmod.ascii(ui, state, type, char, lines, coldata)
1525 displayer.close()
1525 displayer.close()
1526
1526
1527 def graphlog(ui, repo, *pats, **opts):
1527 def graphlog(ui, repo, *pats, **opts):
1528 # Parameters are identical to log command ones
1528 # Parameters are identical to log command ones
1529 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
1529 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
1530 revdag = graphmod.dagwalker(repo, revs)
1530 revdag = graphmod.dagwalker(repo, revs)
1531
1531
1532 getrenamed = None
1532 getrenamed = None
1533 if opts.get('copies'):
1533 if opts.get('copies'):
1534 endrev = None
1534 endrev = None
1535 if opts.get('rev'):
1535 if opts.get('rev'):
1536 endrev = max(scmutil.revrange(repo, opts.get('rev'))) + 1
1536 endrev = max(scmutil.revrange(repo, opts.get('rev'))) + 1
1537 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
1537 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
1538 displayer = show_changeset(ui, repo, opts, buffered=True)
1538 displayer = show_changeset(ui, repo, opts, buffered=True)
1539 showparents = [ctx.node() for ctx in repo[None].parents()]
1539 showparents = [ctx.node() for ctx in repo[None].parents()]
1540 displaygraph(ui, revdag, displayer, showparents,
1540 displaygraph(ui, revdag, displayer, showparents,
1541 graphmod.asciiedges, getrenamed, filematcher)
1541 graphmod.asciiedges, getrenamed, filematcher)
1542
1542
1543 def checkunsupportedgraphflags(pats, opts):
1543 def checkunsupportedgraphflags(pats, opts):
1544 for op in ["newest_first"]:
1544 for op in ["newest_first"]:
1545 if op in opts and opts[op]:
1545 if op in opts and opts[op]:
1546 raise util.Abort(_("-G/--graph option is incompatible with --%s")
1546 raise util.Abort(_("-G/--graph option is incompatible with --%s")
1547 % op.replace("_", "-"))
1547 % op.replace("_", "-"))
1548
1548
1549 def graphrevs(repo, nodes, opts):
1549 def graphrevs(repo, nodes, opts):
1550 limit = loglimit(opts)
1550 limit = loglimit(opts)
1551 nodes.reverse()
1551 nodes.reverse()
1552 if limit is not None:
1552 if limit is not None:
1553 nodes = nodes[:limit]
1553 nodes = nodes[:limit]
1554 return graphmod.nodes(repo, nodes)
1554 return graphmod.nodes(repo, nodes)
1555
1555
1556 def add(ui, repo, match, dryrun, listsubrepos, prefix, explicitonly):
1556 def add(ui, repo, match, dryrun, listsubrepos, prefix, explicitonly):
1557 join = lambda f: os.path.join(prefix, f)
1557 join = lambda f: os.path.join(prefix, f)
1558 bad = []
1558 bad = []
1559 oldbad = match.bad
1559 oldbad = match.bad
1560 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1560 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1561 names = []
1561 names = []
1562 wctx = repo[None]
1562 wctx = repo[None]
1563 cca = None
1563 cca = None
1564 abort, warn = scmutil.checkportabilityalert(ui)
1564 abort, warn = scmutil.checkportabilityalert(ui)
1565 if abort or warn:
1565 if abort or warn:
1566 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
1566 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
1567 for f in repo.walk(match):
1567 for f in repo.walk(match):
1568 exact = match.exact(f)
1568 exact = match.exact(f)
1569 if exact or not explicitonly and f not in repo.dirstate:
1569 if exact or not explicitonly and f not in repo.dirstate:
1570 if cca:
1570 if cca:
1571 cca(f)
1571 cca(f)
1572 names.append(f)
1572 names.append(f)
1573 if ui.verbose or not exact:
1573 if ui.verbose or not exact:
1574 ui.status(_('adding %s\n') % match.rel(join(f)))
1574 ui.status(_('adding %s\n') % match.rel(join(f)))
1575
1575
1576 for subpath in sorted(wctx.substate):
1576 for subpath in sorted(wctx.substate):
1577 sub = wctx.sub(subpath)
1577 sub = wctx.sub(subpath)
1578 try:
1578 try:
1579 submatch = matchmod.narrowmatcher(subpath, match)
1579 submatch = matchmod.narrowmatcher(subpath, match)
1580 if listsubrepos:
1580 if listsubrepos:
1581 bad.extend(sub.add(ui, submatch, dryrun, listsubrepos, prefix,
1581 bad.extend(sub.add(ui, submatch, dryrun, listsubrepos, prefix,
1582 False))
1582 False))
1583 else:
1583 else:
1584 bad.extend(sub.add(ui, submatch, dryrun, listsubrepos, prefix,
1584 bad.extend(sub.add(ui, submatch, dryrun, listsubrepos, prefix,
1585 True))
1585 True))
1586 except error.LookupError:
1586 except error.LookupError:
1587 ui.status(_("skipping missing subrepository: %s\n")
1587 ui.status(_("skipping missing subrepository: %s\n")
1588 % join(subpath))
1588 % join(subpath))
1589
1589
1590 if not dryrun:
1590 if not dryrun:
1591 rejected = wctx.add(names, prefix)
1591 rejected = wctx.add(names, prefix)
1592 bad.extend(f for f in rejected if f in match.files())
1592 bad.extend(f for f in rejected if f in match.files())
1593 return bad
1593 return bad
1594
1594
1595 def forget(ui, repo, match, prefix, explicitonly):
1595 def forget(ui, repo, match, prefix, explicitonly):
1596 join = lambda f: os.path.join(prefix, f)
1596 join = lambda f: os.path.join(prefix, f)
1597 bad = []
1597 bad = []
1598 oldbad = match.bad
1598 oldbad = match.bad
1599 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1599 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1600 wctx = repo[None]
1600 wctx = repo[None]
1601 forgot = []
1601 forgot = []
1602 s = repo.status(match=match, clean=True)
1602 s = repo.status(match=match, clean=True)
1603 forget = sorted(s[0] + s[1] + s[3] + s[6])
1603 forget = sorted(s[0] + s[1] + s[3] + s[6])
1604 if explicitonly:
1604 if explicitonly:
1605 forget = [f for f in forget if match.exact(f)]
1605 forget = [f for f in forget if match.exact(f)]
1606
1606
1607 for subpath in sorted(wctx.substate):
1607 for subpath in sorted(wctx.substate):
1608 sub = wctx.sub(subpath)
1608 sub = wctx.sub(subpath)
1609 try:
1609 try:
1610 submatch = matchmod.narrowmatcher(subpath, match)
1610 submatch = matchmod.narrowmatcher(subpath, match)
1611 subbad, subforgot = sub.forget(ui, submatch, prefix)
1611 subbad, subforgot = sub.forget(ui, submatch, prefix)
1612 bad.extend([subpath + '/' + f for f in subbad])
1612 bad.extend([subpath + '/' + f for f in subbad])
1613 forgot.extend([subpath + '/' + f for f in subforgot])
1613 forgot.extend([subpath + '/' + f for f in subforgot])
1614 except error.LookupError:
1614 except error.LookupError:
1615 ui.status(_("skipping missing subrepository: %s\n")
1615 ui.status(_("skipping missing subrepository: %s\n")
1616 % join(subpath))
1616 % join(subpath))
1617
1617
1618 if not explicitonly:
1618 if not explicitonly:
1619 for f in match.files():
1619 for f in match.files():
1620 if f not in repo.dirstate and not os.path.isdir(match.rel(join(f))):
1620 if f not in repo.dirstate and not os.path.isdir(match.rel(join(f))):
1621 if f not in forgot:
1621 if f not in forgot:
1622 if os.path.exists(match.rel(join(f))):
1622 if os.path.exists(match.rel(join(f))):
1623 ui.warn(_('not removing %s: '
1623 ui.warn(_('not removing %s: '
1624 'file is already untracked\n')
1624 'file is already untracked\n')
1625 % match.rel(join(f)))
1625 % match.rel(join(f)))
1626 bad.append(f)
1626 bad.append(f)
1627
1627
1628 for f in forget:
1628 for f in forget:
1629 if ui.verbose or not match.exact(f):
1629 if ui.verbose or not match.exact(f):
1630 ui.status(_('removing %s\n') % match.rel(join(f)))
1630 ui.status(_('removing %s\n') % match.rel(join(f)))
1631
1631
1632 rejected = wctx.forget(forget, prefix)
1632 rejected = wctx.forget(forget, prefix)
1633 bad.extend(f for f in rejected if f in match.files())
1633 bad.extend(f for f in rejected if f in match.files())
1634 forgot.extend(forget)
1634 forgot.extend(forget)
1635 return bad, forgot
1635 return bad, forgot
1636
1636
1637 def duplicatecopies(repo, rev, fromrev):
1637 def duplicatecopies(repo, rev, fromrev):
1638 '''reproduce copies from fromrev to rev in the dirstate'''
1638 '''reproduce copies from fromrev to rev in the dirstate'''
1639 for dst, src in copies.pathcopies(repo[fromrev], repo[rev]).iteritems():
1639 for dst, src in copies.pathcopies(repo[fromrev], repo[rev]).iteritems():
1640 # copies.pathcopies returns backward renames, so dst might not
1640 # copies.pathcopies returns backward renames, so dst might not
1641 # actually be in the dirstate
1641 # actually be in the dirstate
1642 if repo.dirstate[dst] in "nma":
1642 if repo.dirstate[dst] in "nma":
1643 repo.dirstate.copy(src, dst)
1643 repo.dirstate.copy(src, dst)
1644
1644
1645 def commit(ui, repo, commitfunc, pats, opts):
1645 def commit(ui, repo, commitfunc, pats, opts):
1646 '''commit the specified files or all outstanding changes'''
1646 '''commit the specified files or all outstanding changes'''
1647 date = opts.get('date')
1647 date = opts.get('date')
1648 if date:
1648 if date:
1649 opts['date'] = util.parsedate(date)
1649 opts['date'] = util.parsedate(date)
1650 message = logmessage(ui, opts)
1650 message = logmessage(ui, opts)
1651
1651
1652 # extract addremove carefully -- this function can be called from a command
1652 # extract addremove carefully -- this function can be called from a command
1653 # that doesn't support addremove
1653 # that doesn't support addremove
1654 if opts.get('addremove'):
1654 if opts.get('addremove'):
1655 scmutil.addremove(repo, pats, opts)
1655 scmutil.addremove(repo, pats, opts)
1656
1656
1657 return commitfunc(ui, repo, message,
1657 return commitfunc(ui, repo, message,
1658 scmutil.match(repo[None], pats, opts), opts)
1658 scmutil.match(repo[None], pats, opts), opts)
1659
1659
1660 def amend(ui, repo, commitfunc, old, extra, pats, opts):
1660 def amend(ui, repo, commitfunc, old, extra, pats, opts):
1661 ui.note(_('amending changeset %s\n') % old)
1661 ui.note(_('amending changeset %s\n') % old)
1662 base = old.p1()
1662 base = old.p1()
1663
1663
1664 wlock = lock = newid = None
1664 wlock = lock = newid = None
1665 try:
1665 try:
1666 wlock = repo.wlock()
1666 wlock = repo.wlock()
1667 lock = repo.lock()
1667 lock = repo.lock()
1668 tr = repo.transaction('amend')
1668 tr = repo.transaction('amend')
1669 try:
1669 try:
1670 # See if we got a message from -m or -l, if not, open the editor
1670 # See if we got a message from -m or -l, if not, open the editor
1671 # with the message of the changeset to amend
1671 # with the message of the changeset to amend
1672 message = logmessage(ui, opts)
1672 message = logmessage(ui, opts)
1673 # ensure logfile does not conflict with later enforcement of the
1673 # ensure logfile does not conflict with later enforcement of the
1674 # message. potential logfile content has been processed by
1674 # message. potential logfile content has been processed by
1675 # `logmessage` anyway.
1675 # `logmessage` anyway.
1676 opts.pop('logfile')
1676 opts.pop('logfile')
1677 # First, do a regular commit to record all changes in the working
1677 # First, do a regular commit to record all changes in the working
1678 # directory (if there are any)
1678 # directory (if there are any)
1679 ui.callhooks = False
1679 ui.callhooks = False
1680 currentbookmark = repo._bookmarkcurrent
1680 currentbookmark = repo._bookmarkcurrent
1681 try:
1681 try:
1682 repo._bookmarkcurrent = None
1682 repo._bookmarkcurrent = None
1683 opts['message'] = 'temporary amend commit for %s' % old
1683 opts['message'] = 'temporary amend commit for %s' % old
1684 node = commit(ui, repo, commitfunc, pats, opts)
1684 node = commit(ui, repo, commitfunc, pats, opts)
1685 finally:
1685 finally:
1686 repo._bookmarkcurrent = currentbookmark
1686 repo._bookmarkcurrent = currentbookmark
1687 ui.callhooks = True
1687 ui.callhooks = True
1688 ctx = repo[node]
1688 ctx = repo[node]
1689
1689
1690 # Participating changesets:
1690 # Participating changesets:
1691 #
1691 #
1692 # node/ctx o - new (intermediate) commit that contains changes
1692 # node/ctx o - new (intermediate) commit that contains changes
1693 # | from working dir to go into amending commit
1693 # | from working dir to go into amending commit
1694 # | (or a workingctx if there were no changes)
1694 # | (or a workingctx if there were no changes)
1695 # |
1695 # |
1696 # old o - changeset to amend
1696 # old o - changeset to amend
1697 # |
1697 # |
1698 # base o - parent of amending changeset
1698 # base o - parent of amending changeset
1699
1699
1700 # Update extra dict from amended commit (e.g. to preserve graft
1700 # Update extra dict from amended commit (e.g. to preserve graft
1701 # source)
1701 # source)
1702 extra.update(old.extra())
1702 extra.update(old.extra())
1703
1703
1704 # Also update it from the intermediate commit or from the wctx
1704 # Also update it from the intermediate commit or from the wctx
1705 extra.update(ctx.extra())
1705 extra.update(ctx.extra())
1706
1706
1707 if len(old.parents()) > 1:
1707 if len(old.parents()) > 1:
1708 # ctx.files() isn't reliable for merges, so fall back to the
1708 # ctx.files() isn't reliable for merges, so fall back to the
1709 # slower repo.status() method
1709 # slower repo.status() method
1710 files = set([fn for st in repo.status(base, old)[:3]
1710 files = set([fn for st in repo.status(base, old)[:3]
1711 for fn in st])
1711 for fn in st])
1712 else:
1712 else:
1713 files = set(old.files())
1713 files = set(old.files())
1714
1714
1715 # Second, we use either the commit we just did, or if there were no
1715 # Second, we use either the commit we just did, or if there were no
1716 # changes the parent of the working directory as the version of the
1716 # changes the parent of the working directory as the version of the
1717 # files in the final amend commit
1717 # files in the final amend commit
1718 if node:
1718 if node:
1719 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
1719 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
1720
1720
1721 user = ctx.user()
1721 user = ctx.user()
1722 date = ctx.date()
1722 date = ctx.date()
1723 # Recompute copies (avoid recording a -> b -> a)
1723 # Recompute copies (avoid recording a -> b -> a)
1724 copied = copies.pathcopies(base, ctx)
1724 copied = copies.pathcopies(base, ctx)
1725
1725
1726 # Prune files which were reverted by the updates: if old
1726 # Prune files which were reverted by the updates: if old
1727 # introduced file X and our intermediate commit, node,
1727 # introduced file X and our intermediate commit, node,
1728 # renamed that file, then those two files are the same and
1728 # renamed that file, then those two files are the same and
1729 # we can discard X from our list of files. Likewise if X
1729 # we can discard X from our list of files. Likewise if X
1730 # was deleted, it's no longer relevant
1730 # was deleted, it's no longer relevant
1731 files.update(ctx.files())
1731 files.update(ctx.files())
1732
1732
1733 def samefile(f):
1733 def samefile(f):
1734 if f in ctx.manifest():
1734 if f in ctx.manifest():
1735 a = ctx.filectx(f)
1735 a = ctx.filectx(f)
1736 if f in base.manifest():
1736 if f in base.manifest():
1737 b = base.filectx(f)
1737 b = base.filectx(f)
1738 return (not a.cmp(b)
1738 return (not a.cmp(b)
1739 and a.flags() == b.flags())
1739 and a.flags() == b.flags())
1740 else:
1740 else:
1741 return False
1741 return False
1742 else:
1742 else:
1743 return f not in base.manifest()
1743 return f not in base.manifest()
1744 files = [f for f in files if not samefile(f)]
1744 files = [f for f in files if not samefile(f)]
1745
1745
1746 def filectxfn(repo, ctx_, path):
1746 def filectxfn(repo, ctx_, path):
1747 try:
1747 try:
1748 fctx = ctx[path]
1748 fctx = ctx[path]
1749 flags = fctx.flags()
1749 flags = fctx.flags()
1750 mctx = context.memfilectx(fctx.path(), fctx.data(),
1750 mctx = context.memfilectx(fctx.path(), fctx.data(),
1751 islink='l' in flags,
1751 islink='l' in flags,
1752 isexec='x' in flags,
1752 isexec='x' in flags,
1753 copied=copied.get(path))
1753 copied=copied.get(path))
1754 return mctx
1754 return mctx
1755 except KeyError:
1755 except KeyError:
1756 raise IOError
1756 raise IOError
1757 else:
1757 else:
1758 ui.note(_('copying changeset %s to %s\n') % (old, base))
1758 ui.note(_('copying changeset %s to %s\n') % (old, base))
1759
1759
1760 # Use version of files as in the old cset
1760 # Use version of files as in the old cset
1761 def filectxfn(repo, ctx_, path):
1761 def filectxfn(repo, ctx_, path):
1762 try:
1762 try:
1763 return old.filectx(path)
1763 return old.filectx(path)
1764 except KeyError:
1764 except KeyError:
1765 raise IOError
1765 raise IOError
1766
1766
1767 user = opts.get('user') or old.user()
1767 user = opts.get('user') or old.user()
1768 date = opts.get('date') or old.date()
1768 date = opts.get('date') or old.date()
1769 editmsg = False
1769 editmsg = False
1770 if not message:
1770 if not message:
1771 editmsg = True
1771 editmsg = True
1772 message = old.description()
1772 message = old.description()
1773
1773
1774 pureextra = extra.copy()
1774 pureextra = extra.copy()
1775 extra['amend_source'] = old.hex()
1775 extra['amend_source'] = old.hex()
1776
1776
1777 new = context.memctx(repo,
1777 new = context.memctx(repo,
1778 parents=[base.node(), old.p2().node()],
1778 parents=[base.node(), old.p2().node()],
1779 text=message,
1779 text=message,
1780 files=files,
1780 files=files,
1781 filectxfn=filectxfn,
1781 filectxfn=filectxfn,
1782 user=user,
1782 user=user,
1783 date=date,
1783 date=date,
1784 extra=extra)
1784 extra=extra)
1785 if editmsg:
1785 if editmsg:
1786 new._text = commitforceeditor(repo, new, [])
1786 new._text = commitforceeditor(repo, new, [])
1787
1787
1788 newdesc = changelog.stripdesc(new.description())
1788 newdesc = changelog.stripdesc(new.description())
1789 if ((not node)
1789 if ((not node)
1790 and newdesc == old.description()
1790 and newdesc == old.description()
1791 and user == old.user()
1791 and user == old.user()
1792 and date == old.date()
1792 and date == old.date()
1793 and pureextra == old.extra()):
1793 and pureextra == old.extra()):
1794 # nothing changed. continuing here would create a new node
1794 # nothing changed. continuing here would create a new node
1795 # anyway because of the amend_source noise.
1795 # anyway because of the amend_source noise.
1796 #
1796 #
1797 # This not what we expect from amend.
1797 # This not what we expect from amend.
1798 return old.node()
1798 return old.node()
1799
1799
1800 ph = repo.ui.config('phases', 'new-commit', phases.draft)
1800 ph = repo.ui.config('phases', 'new-commit', phases.draft)
1801 try:
1801 try:
1802 repo.ui.setconfig('phases', 'new-commit', old.phase())
1802 repo.ui.setconfig('phases', 'new-commit', old.phase())
1803 newid = repo.commitctx(new)
1803 newid = repo.commitctx(new)
1804 finally:
1804 finally:
1805 repo.ui.setconfig('phases', 'new-commit', ph)
1805 repo.ui.setconfig('phases', 'new-commit', ph)
1806 if newid != old.node():
1806 if newid != old.node():
1807 # Reroute the working copy parent to the new changeset
1807 # Reroute the working copy parent to the new changeset
1808 repo.setparents(newid, nullid)
1808 repo.setparents(newid, nullid)
1809
1809
1810 # Move bookmarks from old parent to amend commit
1810 # Move bookmarks from old parent to amend commit
1811 bms = repo.nodebookmarks(old.node())
1811 bms = repo.nodebookmarks(old.node())
1812 if bms:
1812 if bms:
1813 marks = repo._bookmarks
1813 marks = repo._bookmarks
1814 for bm in bms:
1814 for bm in bms:
1815 marks[bm] = newid
1815 marks[bm] = newid
1816 marks.write()
1816 marks.write()
1817 #commit the whole amend process
1817 #commit the whole amend process
1818 if obsolete._enabled and newid != old.node():
1818 if obsolete._enabled and newid != old.node():
1819 # mark the new changeset as successor of the rewritten one
1819 # mark the new changeset as successor of the rewritten one
1820 new = repo[newid]
1820 new = repo[newid]
1821 obs = [(old, (new,))]
1821 obs = [(old, (new,))]
1822 if node:
1822 if node:
1823 obs.append((ctx, ()))
1823 obs.append((ctx, ()))
1824
1824
1825 obsolete.createmarkers(repo, obs)
1825 obsolete.createmarkers(repo, obs)
1826 tr.close()
1826 tr.close()
1827 finally:
1827 finally:
1828 tr.release()
1828 tr.release()
1829 if (not obsolete._enabled) and newid != old.node():
1829 if (not obsolete._enabled) and newid != old.node():
1830 # Strip the intermediate commit (if there was one) and the amended
1830 # Strip the intermediate commit (if there was one) and the amended
1831 # commit
1831 # commit
1832 if node:
1832 if node:
1833 ui.note(_('stripping intermediate changeset %s\n') % ctx)
1833 ui.note(_('stripping intermediate changeset %s\n') % ctx)
1834 ui.note(_('stripping amended changeset %s\n') % old)
1834 ui.note(_('stripping amended changeset %s\n') % old)
1835 repair.strip(ui, repo, old.node(), topic='amend-backup')
1835 repair.strip(ui, repo, old.node(), topic='amend-backup')
1836 finally:
1836 finally:
1837 if newid is None:
1837 if newid is None:
1838 repo.dirstate.invalidate()
1838 repo.dirstate.invalidate()
1839 lockmod.release(lock, wlock)
1839 lockmod.release(lock, wlock)
1840 return newid
1840 return newid
1841
1841
1842 def commiteditor(repo, ctx, subs):
1842 def commiteditor(repo, ctx, subs):
1843 if ctx.description():
1843 if ctx.description():
1844 return ctx.description()
1844 return ctx.description()
1845 return commitforceeditor(repo, ctx, subs)
1845 return commitforceeditor(repo, ctx, subs)
1846
1846
1847 def commitforceeditor(repo, ctx, subs):
1847 def commitforceeditor(repo, ctx, subs):
1848 edittext = []
1848 edittext = []
1849 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
1849 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
1850 if ctx.description():
1850 if ctx.description():
1851 edittext.append(ctx.description())
1851 edittext.append(ctx.description())
1852 edittext.append("")
1852 edittext.append("")
1853 edittext.append("") # Empty line between message and comments.
1853 edittext.append("") # Empty line between message and comments.
1854 edittext.append(_("HG: Enter commit message."
1854 edittext.append(_("HG: Enter commit message."
1855 " Lines beginning with 'HG:' are removed."))
1855 " Lines beginning with 'HG:' are removed."))
1856 edittext.append(_("HG: Leave message empty to abort commit."))
1856 edittext.append(_("HG: Leave message empty to abort commit."))
1857 edittext.append("HG: --")
1857 edittext.append("HG: --")
1858 edittext.append(_("HG: user: %s") % ctx.user())
1858 edittext.append(_("HG: user: %s") % ctx.user())
1859 if ctx.p2():
1859 if ctx.p2():
1860 edittext.append(_("HG: branch merge"))
1860 edittext.append(_("HG: branch merge"))
1861 if ctx.branch():
1861 if ctx.branch():
1862 edittext.append(_("HG: branch '%s'") % ctx.branch())
1862 edittext.append(_("HG: branch '%s'") % ctx.branch())
1863 if bookmarks.iscurrent(repo):
1863 if bookmarks.iscurrent(repo):
1864 edittext.append(_("HG: bookmark '%s'") % repo._bookmarkcurrent)
1864 edittext.append(_("HG: bookmark '%s'") % repo._bookmarkcurrent)
1865 edittext.extend([_("HG: subrepo %s") % s for s in subs])
1865 edittext.extend([_("HG: subrepo %s") % s for s in subs])
1866 edittext.extend([_("HG: added %s") % f for f in added])
1866 edittext.extend([_("HG: added %s") % f for f in added])
1867 edittext.extend([_("HG: changed %s") % f for f in modified])
1867 edittext.extend([_("HG: changed %s") % f for f in modified])
1868 edittext.extend([_("HG: removed %s") % f for f in removed])
1868 edittext.extend([_("HG: removed %s") % f for f in removed])
1869 if not added and not modified and not removed:
1869 if not added and not modified and not removed:
1870 edittext.append(_("HG: no files changed"))
1870 edittext.append(_("HG: no files changed"))
1871 edittext.append("")
1871 edittext.append("")
1872 # run editor in the repository root
1872 # run editor in the repository root
1873 olddir = os.getcwd()
1873 olddir = os.getcwd()
1874 os.chdir(repo.root)
1874 os.chdir(repo.root)
1875 text = repo.ui.edit("\n".join(edittext), ctx.user())
1875 text = repo.ui.edit("\n".join(edittext), ctx.user())
1876 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
1876 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
1877 os.chdir(olddir)
1877 os.chdir(olddir)
1878
1878
1879 if not text.strip():
1879 if not text.strip():
1880 raise util.Abort(_("empty commit message"))
1880 raise util.Abort(_("empty commit message"))
1881
1881
1882 return text
1882 return text
1883
1883
1884 def commitstatus(repo, node, branch, bheads=None, opts={}):
1884 def commitstatus(repo, node, branch, bheads=None, opts={}):
1885 ctx = repo[node]
1885 ctx = repo[node]
1886 parents = ctx.parents()
1886 parents = ctx.parents()
1887
1887
1888 if (not opts.get('amend') and bheads and node not in bheads and not
1888 if (not opts.get('amend') and bheads and node not in bheads and not
1889 [x for x in parents if x.node() in bheads and x.branch() == branch]):
1889 [x for x in parents if x.node() in bheads and x.branch() == branch]):
1890 repo.ui.status(_('created new head\n'))
1890 repo.ui.status(_('created new head\n'))
1891 # The message is not printed for initial roots. For the other
1891 # The message is not printed for initial roots. For the other
1892 # changesets, it is printed in the following situations:
1892 # changesets, it is printed in the following situations:
1893 #
1893 #
1894 # Par column: for the 2 parents with ...
1894 # Par column: for the 2 parents with ...
1895 # N: null or no parent
1895 # N: null or no parent
1896 # B: parent is on another named branch
1896 # B: parent is on another named branch
1897 # C: parent is a regular non head changeset
1897 # C: parent is a regular non head changeset
1898 # H: parent was a branch head of the current branch
1898 # H: parent was a branch head of the current branch
1899 # Msg column: whether we print "created new head" message
1899 # Msg column: whether we print "created new head" message
1900 # In the following, it is assumed that there already exists some
1900 # In the following, it is assumed that there already exists some
1901 # initial branch heads of the current branch, otherwise nothing is
1901 # initial branch heads of the current branch, otherwise nothing is
1902 # printed anyway.
1902 # printed anyway.
1903 #
1903 #
1904 # Par Msg Comment
1904 # Par Msg Comment
1905 # N N y additional topo root
1905 # N N y additional topo root
1906 #
1906 #
1907 # B N y additional branch root
1907 # B N y additional branch root
1908 # C N y additional topo head
1908 # C N y additional topo head
1909 # H N n usual case
1909 # H N n usual case
1910 #
1910 #
1911 # B B y weird additional branch root
1911 # B B y weird additional branch root
1912 # C B y branch merge
1912 # C B y branch merge
1913 # H B n merge with named branch
1913 # H B n merge with named branch
1914 #
1914 #
1915 # C C y additional head from merge
1915 # C C y additional head from merge
1916 # C H n merge with a head
1916 # C H n merge with a head
1917 #
1917 #
1918 # H H n head merge: head count decreases
1918 # H H n head merge: head count decreases
1919
1919
1920 if not opts.get('close_branch'):
1920 if not opts.get('close_branch'):
1921 for r in parents:
1921 for r in parents:
1922 if r.closesbranch() and r.branch() == branch:
1922 if r.closesbranch() and r.branch() == branch:
1923 repo.ui.status(_('reopening closed branch head %d\n') % r)
1923 repo.ui.status(_('reopening closed branch head %d\n') % r)
1924
1924
1925 if repo.ui.debugflag:
1925 if repo.ui.debugflag:
1926 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
1926 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
1927 elif repo.ui.verbose:
1927 elif repo.ui.verbose:
1928 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
1928 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
1929
1929
1930 def revert(ui, repo, ctx, parents, *pats, **opts):
1930 def revert(ui, repo, ctx, parents, *pats, **opts):
1931 parent, p2 = parents
1931 parent, p2 = parents
1932 node = ctx.node()
1932 node = ctx.node()
1933
1933
1934 mf = ctx.manifest()
1934 mf = ctx.manifest()
1935 if node == parent:
1935 if node == parent:
1936 pmf = mf
1936 pmf = mf
1937 else:
1937 else:
1938 pmf = None
1938 pmf = None
1939
1939
1940 # need all matching names in dirstate and manifest of target rev,
1940 # need all matching names in dirstate and manifest of target rev,
1941 # so have to walk both. do not print errors if files exist in one
1941 # so have to walk both. do not print errors if files exist in one
1942 # but not other.
1942 # but not other.
1943
1943
1944 names = {}
1944 names = {}
1945
1945
1946 wlock = repo.wlock()
1946 wlock = repo.wlock()
1947 try:
1947 try:
1948 # walk dirstate.
1948 # walk dirstate.
1949
1949
1950 m = scmutil.match(repo[None], pats, opts)
1950 m = scmutil.match(repo[None], pats, opts)
1951 m.bad = lambda x, y: False
1951 m.bad = lambda x, y: False
1952 for abs in repo.walk(m):
1952 for abs in repo.walk(m):
1953 names[abs] = m.rel(abs), m.exact(abs)
1953 names[abs] = m.rel(abs), m.exact(abs)
1954
1954
1955 # walk target manifest.
1955 # walk target manifest.
1956
1956
1957 def badfn(path, msg):
1957 def badfn(path, msg):
1958 if path in names:
1958 if path in names:
1959 return
1959 return
1960 if path in ctx.substate:
1960 if path in ctx.substate:
1961 return
1961 return
1962 path_ = path + '/'
1962 path_ = path + '/'
1963 for f in names:
1963 for f in names:
1964 if f.startswith(path_):
1964 if f.startswith(path_):
1965 return
1965 return
1966 ui.warn("%s: %s\n" % (m.rel(path), msg))
1966 ui.warn("%s: %s\n" % (m.rel(path), msg))
1967
1967
1968 m = scmutil.match(ctx, pats, opts)
1968 m = scmutil.match(ctx, pats, opts)
1969 m.bad = badfn
1969 m.bad = badfn
1970 for abs in ctx.walk(m):
1970 for abs in ctx.walk(m):
1971 if abs not in names:
1971 if abs not in names:
1972 names[abs] = m.rel(abs), m.exact(abs)
1972 names[abs] = m.rel(abs), m.exact(abs)
1973
1973
1974 # get the list of subrepos that must be reverted
1974 # get the list of subrepos that must be reverted
1975 targetsubs = sorted(s for s in ctx.substate if m(s))
1975 targetsubs = sorted(s for s in ctx.substate if m(s))
1976 m = scmutil.matchfiles(repo, names)
1976 m = scmutil.matchfiles(repo, names)
1977 changes = repo.status(match=m)[:4]
1977 changes = repo.status(match=m)[:4]
1978 modified, added, removed, deleted = map(set, changes)
1978 modified, added, removed, deleted = map(set, changes)
1979
1979
1980 # if f is a rename, also revert the source
1980 # if f is a rename, also revert the source
1981 cwd = repo.getcwd()
1981 cwd = repo.getcwd()
1982 for f in added:
1982 for f in added:
1983 src = repo.dirstate.copied(f)
1983 src = repo.dirstate.copied(f)
1984 if src and src not in names and repo.dirstate[src] == 'r':
1984 if src and src not in names and repo.dirstate[src] == 'r':
1985 removed.add(src)
1985 removed.add(src)
1986 names[src] = (repo.pathto(src, cwd), True)
1986 names[src] = (repo.pathto(src, cwd), True)
1987
1987
1988 def removeforget(abs):
1988 def removeforget(abs):
1989 if repo.dirstate[abs] == 'a':
1989 if repo.dirstate[abs] == 'a':
1990 return _('forgetting %s\n')
1990 return _('forgetting %s\n')
1991 return _('removing %s\n')
1991 return _('removing %s\n')
1992
1992
1993 revert = ([], _('reverting %s\n'))
1993 revert = ([], _('reverting %s\n'))
1994 add = ([], _('adding %s\n'))
1994 add = ([], _('adding %s\n'))
1995 remove = ([], removeforget)
1995 remove = ([], removeforget)
1996 undelete = ([], _('undeleting %s\n'))
1996 undelete = ([], _('undeleting %s\n'))
1997
1997
1998 disptable = (
1998 disptable = (
1999 # dispatch table:
1999 # dispatch table:
2000 # file state
2000 # file state
2001 # action if in target manifest
2001 # action if in target manifest
2002 # action if not in target manifest
2002 # action if not in target manifest
2003 # make backup if in target manifest
2003 # make backup if in target manifest
2004 # make backup if not in target manifest
2004 # make backup if not in target manifest
2005 (modified, revert, remove, True, True),
2005 (modified, revert, remove, True, True),
2006 (added, revert, remove, True, False),
2006 (added, revert, remove, True, False),
2007 (removed, undelete, None, True, False),
2007 (removed, undelete, None, True, False),
2008 (deleted, revert, remove, False, False),
2008 (deleted, revert, remove, False, False),
2009 )
2009 )
2010
2010
2011 for abs, (rel, exact) in sorted(names.items()):
2011 for abs, (rel, exact) in sorted(names.items()):
2012 mfentry = mf.get(abs)
2012 mfentry = mf.get(abs)
2013 target = repo.wjoin(abs)
2013 target = repo.wjoin(abs)
2014 def handle(xlist, dobackup):
2014 def handle(xlist, dobackup):
2015 xlist[0].append(abs)
2015 xlist[0].append(abs)
2016 if (dobackup and not opts.get('no_backup') and
2016 if (dobackup and not opts.get('no_backup') and
2017 os.path.lexists(target) and
2017 os.path.lexists(target) and
2018 abs in ctx and repo[None][abs].cmp(ctx[abs])):
2018 abs in ctx and repo[None][abs].cmp(ctx[abs])):
2019 bakname = "%s.orig" % rel
2019 bakname = "%s.orig" % rel
2020 ui.note(_('saving current version of %s as %s\n') %
2020 ui.note(_('saving current version of %s as %s\n') %
2021 (rel, bakname))
2021 (rel, bakname))
2022 if not opts.get('dry_run'):
2022 if not opts.get('dry_run'):
2023 util.rename(target, bakname)
2023 util.rename(target, bakname)
2024 if ui.verbose or not exact:
2024 if ui.verbose or not exact:
2025 msg = xlist[1]
2025 msg = xlist[1]
2026 if not isinstance(msg, basestring):
2026 if not isinstance(msg, basestring):
2027 msg = msg(abs)
2027 msg = msg(abs)
2028 ui.status(msg % rel)
2028 ui.status(msg % rel)
2029 for table, hitlist, misslist, backuphit, backupmiss in disptable:
2029 for table, hitlist, misslist, backuphit, backupmiss in disptable:
2030 if abs not in table:
2030 if abs not in table:
2031 continue
2031 continue
2032 # file has changed in dirstate
2032 # file has changed in dirstate
2033 if mfentry:
2033 if mfentry:
2034 handle(hitlist, backuphit)
2034 handle(hitlist, backuphit)
2035 elif misslist is not None:
2035 elif misslist is not None:
2036 handle(misslist, backupmiss)
2036 handle(misslist, backupmiss)
2037 break
2037 break
2038 else:
2038 else:
2039 if abs not in repo.dirstate:
2039 if abs not in repo.dirstate:
2040 if mfentry:
2040 if mfentry:
2041 handle(add, True)
2041 handle(add, True)
2042 elif exact:
2042 elif exact:
2043 ui.warn(_('file not managed: %s\n') % rel)
2043 ui.warn(_('file not managed: %s\n') % rel)
2044 continue
2044 continue
2045 # file has not changed in dirstate
2045 # file has not changed in dirstate
2046 if node == parent:
2046 if node == parent:
2047 if exact:
2047 if exact:
2048 ui.warn(_('no changes needed to %s\n') % rel)
2048 ui.warn(_('no changes needed to %s\n') % rel)
2049 continue
2049 continue
2050 if pmf is None:
2050 if pmf is None:
2051 # only need parent manifest in this unlikely case,
2051 # only need parent manifest in this unlikely case,
2052 # so do not read by default
2052 # so do not read by default
2053 pmf = repo[parent].manifest()
2053 pmf = repo[parent].manifest()
2054 if abs in pmf and mfentry:
2054 if abs in pmf and mfentry:
2055 # if version of file is same in parent and target
2055 # if version of file is same in parent and target
2056 # manifests, do nothing
2056 # manifests, do nothing
2057 if (pmf[abs] != mfentry or
2057 if (pmf[abs] != mfentry or
2058 pmf.flags(abs) != mf.flags(abs)):
2058 pmf.flags(abs) != mf.flags(abs)):
2059 handle(revert, False)
2059 handle(revert, False)
2060 else:
2060 else:
2061 handle(remove, False)
2061 handle(remove, False)
2062
2062
2063 if not opts.get('dry_run'):
2063 if not opts.get('dry_run'):
2064 def checkout(f):
2064 def checkout(f):
2065 fc = ctx[f]
2065 fc = ctx[f]
2066 repo.wwrite(f, fc.data(), fc.flags())
2066 repo.wwrite(f, fc.data(), fc.flags())
2067
2067
2068 audit_path = scmutil.pathauditor(repo.root)
2068 audit_path = pathutil.pathauditor(repo.root)
2069 for f in remove[0]:
2069 for f in remove[0]:
2070 if repo.dirstate[f] == 'a':
2070 if repo.dirstate[f] == 'a':
2071 repo.dirstate.drop(f)
2071 repo.dirstate.drop(f)
2072 continue
2072 continue
2073 audit_path(f)
2073 audit_path(f)
2074 try:
2074 try:
2075 util.unlinkpath(repo.wjoin(f))
2075 util.unlinkpath(repo.wjoin(f))
2076 except OSError:
2076 except OSError:
2077 pass
2077 pass
2078 repo.dirstate.remove(f)
2078 repo.dirstate.remove(f)
2079
2079
2080 normal = None
2080 normal = None
2081 if node == parent:
2081 if node == parent:
2082 # We're reverting to our parent. If possible, we'd like status
2082 # We're reverting to our parent. If possible, we'd like status
2083 # to report the file as clean. We have to use normallookup for
2083 # to report the file as clean. We have to use normallookup for
2084 # merges to avoid losing information about merged/dirty files.
2084 # merges to avoid losing information about merged/dirty files.
2085 if p2 != nullid:
2085 if p2 != nullid:
2086 normal = repo.dirstate.normallookup
2086 normal = repo.dirstate.normallookup
2087 else:
2087 else:
2088 normal = repo.dirstate.normal
2088 normal = repo.dirstate.normal
2089 for f in revert[0]:
2089 for f in revert[0]:
2090 checkout(f)
2090 checkout(f)
2091 if normal:
2091 if normal:
2092 normal(f)
2092 normal(f)
2093
2093
2094 for f in add[0]:
2094 for f in add[0]:
2095 checkout(f)
2095 checkout(f)
2096 repo.dirstate.add(f)
2096 repo.dirstate.add(f)
2097
2097
2098 normal = repo.dirstate.normallookup
2098 normal = repo.dirstate.normallookup
2099 if node == parent and p2 == nullid:
2099 if node == parent and p2 == nullid:
2100 normal = repo.dirstate.normal
2100 normal = repo.dirstate.normal
2101 for f in undelete[0]:
2101 for f in undelete[0]:
2102 checkout(f)
2102 checkout(f)
2103 normal(f)
2103 normal(f)
2104
2104
2105 copied = copies.pathcopies(repo[parent], ctx)
2105 copied = copies.pathcopies(repo[parent], ctx)
2106
2106
2107 for f in add[0] + undelete[0] + revert[0]:
2107 for f in add[0] + undelete[0] + revert[0]:
2108 if f in copied:
2108 if f in copied:
2109 repo.dirstate.copy(copied[f], f)
2109 repo.dirstate.copy(copied[f], f)
2110
2110
2111 if targetsubs:
2111 if targetsubs:
2112 # Revert the subrepos on the revert list
2112 # Revert the subrepos on the revert list
2113 for sub in targetsubs:
2113 for sub in targetsubs:
2114 ctx.sub(sub).revert(ui, ctx.substate[sub], *pats, **opts)
2114 ctx.sub(sub).revert(ui, ctx.substate[sub], *pats, **opts)
2115 finally:
2115 finally:
2116 wlock.release()
2116 wlock.release()
2117
2117
2118 def command(table):
2118 def command(table):
2119 '''returns a function object bound to table which can be used as
2119 '''returns a function object bound to table which can be used as
2120 a decorator for populating table as a command table'''
2120 a decorator for populating table as a command table'''
2121
2121
2122 def cmd(name, options=(), synopsis=None):
2122 def cmd(name, options=(), synopsis=None):
2123 def decorator(func):
2123 def decorator(func):
2124 if synopsis:
2124 if synopsis:
2125 table[name] = func, list(options), synopsis
2125 table[name] = func, list(options), synopsis
2126 else:
2126 else:
2127 table[name] = func, list(options)
2127 table[name] = func, list(options)
2128 return func
2128 return func
2129 return decorator
2129 return decorator
2130
2130
2131 return cmd
2131 return cmd
2132
2132
2133 # a list of (ui, repo) functions called by commands.summary
2133 # a list of (ui, repo) functions called by commands.summary
2134 summaryhooks = util.hooks()
2134 summaryhooks = util.hooks()
2135
2135
2136 # A list of state files kept by multistep operations like graft.
2136 # A list of state files kept by multistep operations like graft.
2137 # Since graft cannot be aborted, it is considered 'clearable' by update.
2137 # Since graft cannot be aborted, it is considered 'clearable' by update.
2138 # note: bisect is intentionally excluded
2138 # note: bisect is intentionally excluded
2139 # (state file, clearable, allowcommit, error, hint)
2139 # (state file, clearable, allowcommit, error, hint)
2140 unfinishedstates = [
2140 unfinishedstates = [
2141 ('graftstate', True, False, _('graft in progress'),
2141 ('graftstate', True, False, _('graft in progress'),
2142 _("use 'hg graft --continue' or 'hg update' to abort")),
2142 _("use 'hg graft --continue' or 'hg update' to abort")),
2143 ('updatestate', True, False, _('last update was interrupted'),
2143 ('updatestate', True, False, _('last update was interrupted'),
2144 _("use 'hg update' to get a consistent checkout"))
2144 _("use 'hg update' to get a consistent checkout"))
2145 ]
2145 ]
2146
2146
2147 def checkunfinished(repo, commit=False):
2147 def checkunfinished(repo, commit=False):
2148 '''Look for an unfinished multistep operation, like graft, and abort
2148 '''Look for an unfinished multistep operation, like graft, and abort
2149 if found. It's probably good to check this right before
2149 if found. It's probably good to check this right before
2150 bailifchanged().
2150 bailifchanged().
2151 '''
2151 '''
2152 for f, clearable, allowcommit, msg, hint in unfinishedstates:
2152 for f, clearable, allowcommit, msg, hint in unfinishedstates:
2153 if commit and allowcommit:
2153 if commit and allowcommit:
2154 continue
2154 continue
2155 if repo.vfs.exists(f):
2155 if repo.vfs.exists(f):
2156 raise util.Abort(msg, hint=hint)
2156 raise util.Abort(msg, hint=hint)
2157
2157
2158 def clearunfinished(repo):
2158 def clearunfinished(repo):
2159 '''Check for unfinished operations (as above), and clear the ones
2159 '''Check for unfinished operations (as above), and clear the ones
2160 that are clearable.
2160 that are clearable.
2161 '''
2161 '''
2162 for f, clearable, allowcommit, msg, hint in unfinishedstates:
2162 for f, clearable, allowcommit, msg, hint in unfinishedstates:
2163 if not clearable and repo.vfs.exists(f):
2163 if not clearable and repo.vfs.exists(f):
2164 raise util.Abort(msg, hint=hint)
2164 raise util.Abort(msg, hint=hint)
2165 for f, clearable, allowcommit, msg, hint in unfinishedstates:
2165 for f, clearable, allowcommit, msg, hint in unfinishedstates:
2166 if clearable and repo.vfs.exists(f):
2166 if clearable and repo.vfs.exists(f):
2167 util.unlink(repo.join(f))
2167 util.unlink(repo.join(f))
@@ -1,852 +1,852 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 import errno
7 import errno
8
8
9 from node import nullid
9 from node import nullid
10 from i18n import _
10 from i18n import _
11 import scmutil, util, ignore, osutil, parsers, encoding
11 import scmutil, util, ignore, osutil, parsers, encoding, pathutil
12 import os, stat, errno, gc
12 import os, stat, errno, gc
13
13
14 propertycache = util.propertycache
14 propertycache = util.propertycache
15 filecache = scmutil.filecache
15 filecache = scmutil.filecache
16 _rangemask = 0x7fffffff
16 _rangemask = 0x7fffffff
17
17
18 class repocache(filecache):
18 class repocache(filecache):
19 """filecache for files in .hg/"""
19 """filecache for files in .hg/"""
20 def join(self, obj, fname):
20 def join(self, obj, fname):
21 return obj._opener.join(fname)
21 return obj._opener.join(fname)
22
22
23 class rootcache(filecache):
23 class rootcache(filecache):
24 """filecache for files in the repository root"""
24 """filecache for files in the repository root"""
25 def join(self, obj, fname):
25 def join(self, obj, fname):
26 return obj._join(fname)
26 return obj._join(fname)
27
27
28 class dirstate(object):
28 class dirstate(object):
29
29
30 def __init__(self, opener, ui, root, validate):
30 def __init__(self, opener, ui, root, validate):
31 '''Create a new dirstate object.
31 '''Create a new dirstate object.
32
32
33 opener is an open()-like callable that can be used to open the
33 opener is an open()-like callable that can be used to open the
34 dirstate file; root is the root of the directory tracked by
34 dirstate file; root is the root of the directory tracked by
35 the dirstate.
35 the dirstate.
36 '''
36 '''
37 self._opener = opener
37 self._opener = opener
38 self._validate = validate
38 self._validate = validate
39 self._root = root
39 self._root = root
40 self._rootdir = os.path.join(root, '')
40 self._rootdir = os.path.join(root, '')
41 self._dirty = False
41 self._dirty = False
42 self._dirtypl = False
42 self._dirtypl = False
43 self._lastnormaltime = 0
43 self._lastnormaltime = 0
44 self._ui = ui
44 self._ui = ui
45 self._filecache = {}
45 self._filecache = {}
46
46
47 @propertycache
47 @propertycache
48 def _map(self):
48 def _map(self):
49 '''Return the dirstate contents as a map from filename to
49 '''Return the dirstate contents as a map from filename to
50 (state, mode, size, time).'''
50 (state, mode, size, time).'''
51 self._read()
51 self._read()
52 return self._map
52 return self._map
53
53
54 @propertycache
54 @propertycache
55 def _copymap(self):
55 def _copymap(self):
56 self._read()
56 self._read()
57 return self._copymap
57 return self._copymap
58
58
59 @propertycache
59 @propertycache
60 def _foldmap(self):
60 def _foldmap(self):
61 f = {}
61 f = {}
62 for name, s in self._map.iteritems():
62 for name, s in self._map.iteritems():
63 if s[0] != 'r':
63 if s[0] != 'r':
64 f[util.normcase(name)] = name
64 f[util.normcase(name)] = name
65 for name in self._dirs:
65 for name in self._dirs:
66 f[util.normcase(name)] = name
66 f[util.normcase(name)] = name
67 f['.'] = '.' # prevents useless util.fspath() invocation
67 f['.'] = '.' # prevents useless util.fspath() invocation
68 return f
68 return f
69
69
70 @repocache('branch')
70 @repocache('branch')
71 def _branch(self):
71 def _branch(self):
72 try:
72 try:
73 return self._opener.read("branch").strip() or "default"
73 return self._opener.read("branch").strip() or "default"
74 except IOError, inst:
74 except IOError, inst:
75 if inst.errno != errno.ENOENT:
75 if inst.errno != errno.ENOENT:
76 raise
76 raise
77 return "default"
77 return "default"
78
78
79 @propertycache
79 @propertycache
80 def _pl(self):
80 def _pl(self):
81 try:
81 try:
82 fp = self._opener("dirstate")
82 fp = self._opener("dirstate")
83 st = fp.read(40)
83 st = fp.read(40)
84 fp.close()
84 fp.close()
85 l = len(st)
85 l = len(st)
86 if l == 40:
86 if l == 40:
87 return st[:20], st[20:40]
87 return st[:20], st[20:40]
88 elif l > 0 and l < 40:
88 elif l > 0 and l < 40:
89 raise util.Abort(_('working directory state appears damaged!'))
89 raise util.Abort(_('working directory state appears damaged!'))
90 except IOError, err:
90 except IOError, err:
91 if err.errno != errno.ENOENT:
91 if err.errno != errno.ENOENT:
92 raise
92 raise
93 return [nullid, nullid]
93 return [nullid, nullid]
94
94
95 @propertycache
95 @propertycache
96 def _dirs(self):
96 def _dirs(self):
97 return scmutil.dirs(self._map, 'r')
97 return scmutil.dirs(self._map, 'r')
98
98
99 def dirs(self):
99 def dirs(self):
100 return self._dirs
100 return self._dirs
101
101
102 @rootcache('.hgignore')
102 @rootcache('.hgignore')
103 def _ignore(self):
103 def _ignore(self):
104 files = [self._join('.hgignore')]
104 files = [self._join('.hgignore')]
105 for name, path in self._ui.configitems("ui"):
105 for name, path in self._ui.configitems("ui"):
106 if name == 'ignore' or name.startswith('ignore.'):
106 if name == 'ignore' or name.startswith('ignore.'):
107 files.append(util.expandpath(path))
107 files.append(util.expandpath(path))
108 return ignore.ignore(self._root, files, self._ui.warn)
108 return ignore.ignore(self._root, files, self._ui.warn)
109
109
110 @propertycache
110 @propertycache
111 def _slash(self):
111 def _slash(self):
112 return self._ui.configbool('ui', 'slash') and os.sep != '/'
112 return self._ui.configbool('ui', 'slash') and os.sep != '/'
113
113
114 @propertycache
114 @propertycache
115 def _checklink(self):
115 def _checklink(self):
116 return util.checklink(self._root)
116 return util.checklink(self._root)
117
117
118 @propertycache
118 @propertycache
119 def _checkexec(self):
119 def _checkexec(self):
120 return util.checkexec(self._root)
120 return util.checkexec(self._root)
121
121
122 @propertycache
122 @propertycache
123 def _checkcase(self):
123 def _checkcase(self):
124 return not util.checkcase(self._join('.hg'))
124 return not util.checkcase(self._join('.hg'))
125
125
126 def _join(self, f):
126 def _join(self, f):
127 # much faster than os.path.join()
127 # much faster than os.path.join()
128 # it's safe because f is always a relative path
128 # it's safe because f is always a relative path
129 return self._rootdir + f
129 return self._rootdir + f
130
130
131 def flagfunc(self, buildfallback):
131 def flagfunc(self, buildfallback):
132 if self._checklink and self._checkexec:
132 if self._checklink and self._checkexec:
133 def f(x):
133 def f(x):
134 try:
134 try:
135 st = os.lstat(self._join(x))
135 st = os.lstat(self._join(x))
136 if util.statislink(st):
136 if util.statislink(st):
137 return 'l'
137 return 'l'
138 if util.statisexec(st):
138 if util.statisexec(st):
139 return 'x'
139 return 'x'
140 except OSError:
140 except OSError:
141 pass
141 pass
142 return ''
142 return ''
143 return f
143 return f
144
144
145 fallback = buildfallback()
145 fallback = buildfallback()
146 if self._checklink:
146 if self._checklink:
147 def f(x):
147 def f(x):
148 if os.path.islink(self._join(x)):
148 if os.path.islink(self._join(x)):
149 return 'l'
149 return 'l'
150 if 'x' in fallback(x):
150 if 'x' in fallback(x):
151 return 'x'
151 return 'x'
152 return ''
152 return ''
153 return f
153 return f
154 if self._checkexec:
154 if self._checkexec:
155 def f(x):
155 def f(x):
156 if 'l' in fallback(x):
156 if 'l' in fallback(x):
157 return 'l'
157 return 'l'
158 if util.isexec(self._join(x)):
158 if util.isexec(self._join(x)):
159 return 'x'
159 return 'x'
160 return ''
160 return ''
161 return f
161 return f
162 else:
162 else:
163 return fallback
163 return fallback
164
164
165 def getcwd(self):
165 def getcwd(self):
166 cwd = os.getcwd()
166 cwd = os.getcwd()
167 if cwd == self._root:
167 if cwd == self._root:
168 return ''
168 return ''
169 # self._root ends with a path separator if self._root is '/' or 'C:\'
169 # self._root ends with a path separator if self._root is '/' or 'C:\'
170 rootsep = self._root
170 rootsep = self._root
171 if not util.endswithsep(rootsep):
171 if not util.endswithsep(rootsep):
172 rootsep += os.sep
172 rootsep += os.sep
173 if cwd.startswith(rootsep):
173 if cwd.startswith(rootsep):
174 return cwd[len(rootsep):]
174 return cwd[len(rootsep):]
175 else:
175 else:
176 # we're outside the repo. return an absolute path.
176 # we're outside the repo. return an absolute path.
177 return cwd
177 return cwd
178
178
179 def pathto(self, f, cwd=None):
179 def pathto(self, f, cwd=None):
180 if cwd is None:
180 if cwd is None:
181 cwd = self.getcwd()
181 cwd = self.getcwd()
182 path = util.pathto(self._root, cwd, f)
182 path = util.pathto(self._root, cwd, f)
183 if self._slash:
183 if self._slash:
184 return util.pconvert(path)
184 return util.pconvert(path)
185 return path
185 return path
186
186
187 def __getitem__(self, key):
187 def __getitem__(self, key):
188 '''Return the current state of key (a filename) in the dirstate.
188 '''Return the current state of key (a filename) in the dirstate.
189
189
190 States are:
190 States are:
191 n normal
191 n normal
192 m needs merging
192 m needs merging
193 r marked for removal
193 r marked for removal
194 a marked for addition
194 a marked for addition
195 ? not tracked
195 ? not tracked
196 '''
196 '''
197 return self._map.get(key, ("?",))[0]
197 return self._map.get(key, ("?",))[0]
198
198
199 def __contains__(self, key):
199 def __contains__(self, key):
200 return key in self._map
200 return key in self._map
201
201
202 def __iter__(self):
202 def __iter__(self):
203 for x in sorted(self._map):
203 for x in sorted(self._map):
204 yield x
204 yield x
205
205
206 def iteritems(self):
206 def iteritems(self):
207 return self._map.iteritems()
207 return self._map.iteritems()
208
208
209 def parents(self):
209 def parents(self):
210 return [self._validate(p) for p in self._pl]
210 return [self._validate(p) for p in self._pl]
211
211
212 def p1(self):
212 def p1(self):
213 return self._validate(self._pl[0])
213 return self._validate(self._pl[0])
214
214
215 def p2(self):
215 def p2(self):
216 return self._validate(self._pl[1])
216 return self._validate(self._pl[1])
217
217
218 def branch(self):
218 def branch(self):
219 return encoding.tolocal(self._branch)
219 return encoding.tolocal(self._branch)
220
220
221 def setparents(self, p1, p2=nullid):
221 def setparents(self, p1, p2=nullid):
222 """Set dirstate parents to p1 and p2.
222 """Set dirstate parents to p1 and p2.
223
223
224 When moving from two parents to one, 'm' merged entries a
224 When moving from two parents to one, 'm' merged entries a
225 adjusted to normal and previous copy records discarded and
225 adjusted to normal and previous copy records discarded and
226 returned by the call.
226 returned by the call.
227
227
228 See localrepo.setparents()
228 See localrepo.setparents()
229 """
229 """
230 self._dirty = self._dirtypl = True
230 self._dirty = self._dirtypl = True
231 oldp2 = self._pl[1]
231 oldp2 = self._pl[1]
232 self._pl = p1, p2
232 self._pl = p1, p2
233 copies = {}
233 copies = {}
234 if oldp2 != nullid and p2 == nullid:
234 if oldp2 != nullid and p2 == nullid:
235 # Discard 'm' markers when moving away from a merge state
235 # Discard 'm' markers when moving away from a merge state
236 for f, s in self._map.iteritems():
236 for f, s in self._map.iteritems():
237 if s[0] == 'm':
237 if s[0] == 'm':
238 if f in self._copymap:
238 if f in self._copymap:
239 copies[f] = self._copymap[f]
239 copies[f] = self._copymap[f]
240 self.normallookup(f)
240 self.normallookup(f)
241 return copies
241 return copies
242
242
243 def setbranch(self, branch):
243 def setbranch(self, branch):
244 self._branch = encoding.fromlocal(branch)
244 self._branch = encoding.fromlocal(branch)
245 f = self._opener('branch', 'w', atomictemp=True)
245 f = self._opener('branch', 'w', atomictemp=True)
246 try:
246 try:
247 f.write(self._branch + '\n')
247 f.write(self._branch + '\n')
248 f.close()
248 f.close()
249
249
250 # make sure filecache has the correct stat info for _branch after
250 # make sure filecache has the correct stat info for _branch after
251 # replacing the underlying file
251 # replacing the underlying file
252 ce = self._filecache['_branch']
252 ce = self._filecache['_branch']
253 if ce:
253 if ce:
254 ce.refresh()
254 ce.refresh()
255 except: # re-raises
255 except: # re-raises
256 f.discard()
256 f.discard()
257 raise
257 raise
258
258
259 def _read(self):
259 def _read(self):
260 self._map = {}
260 self._map = {}
261 self._copymap = {}
261 self._copymap = {}
262 try:
262 try:
263 st = self._opener.read("dirstate")
263 st = self._opener.read("dirstate")
264 except IOError, err:
264 except IOError, err:
265 if err.errno != errno.ENOENT:
265 if err.errno != errno.ENOENT:
266 raise
266 raise
267 return
267 return
268 if not st:
268 if not st:
269 return
269 return
270
270
271 # Python's garbage collector triggers a GC each time a certain number
271 # Python's garbage collector triggers a GC each time a certain number
272 # of container objects (the number being defined by
272 # of container objects (the number being defined by
273 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
273 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
274 # for each file in the dirstate. The C version then immediately marks
274 # for each file in the dirstate. The C version then immediately marks
275 # them as not to be tracked by the collector. However, this has no
275 # them as not to be tracked by the collector. However, this has no
276 # effect on when GCs are triggered, only on what objects the GC looks
276 # effect on when GCs are triggered, only on what objects the GC looks
277 # into. This means that O(number of files) GCs are unavoidable.
277 # into. This means that O(number of files) GCs are unavoidable.
278 # Depending on when in the process's lifetime the dirstate is parsed,
278 # Depending on when in the process's lifetime the dirstate is parsed,
279 # this can get very expensive. As a workaround, disable GC while
279 # this can get very expensive. As a workaround, disable GC while
280 # parsing the dirstate.
280 # parsing the dirstate.
281 gcenabled = gc.isenabled()
281 gcenabled = gc.isenabled()
282 gc.disable()
282 gc.disable()
283 try:
283 try:
284 p = parsers.parse_dirstate(self._map, self._copymap, st)
284 p = parsers.parse_dirstate(self._map, self._copymap, st)
285 finally:
285 finally:
286 if gcenabled:
286 if gcenabled:
287 gc.enable()
287 gc.enable()
288 if not self._dirtypl:
288 if not self._dirtypl:
289 self._pl = p
289 self._pl = p
290
290
291 def invalidate(self):
291 def invalidate(self):
292 for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs",
292 for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs",
293 "_ignore"):
293 "_ignore"):
294 if a in self.__dict__:
294 if a in self.__dict__:
295 delattr(self, a)
295 delattr(self, a)
296 self._lastnormaltime = 0
296 self._lastnormaltime = 0
297 self._dirty = False
297 self._dirty = False
298
298
299 def copy(self, source, dest):
299 def copy(self, source, dest):
300 """Mark dest as a copy of source. Unmark dest if source is None."""
300 """Mark dest as a copy of source. Unmark dest if source is None."""
301 if source == dest:
301 if source == dest:
302 return
302 return
303 self._dirty = True
303 self._dirty = True
304 if source is not None:
304 if source is not None:
305 self._copymap[dest] = source
305 self._copymap[dest] = source
306 elif dest in self._copymap:
306 elif dest in self._copymap:
307 del self._copymap[dest]
307 del self._copymap[dest]
308
308
309 def copied(self, file):
309 def copied(self, file):
310 return self._copymap.get(file, None)
310 return self._copymap.get(file, None)
311
311
312 def copies(self):
312 def copies(self):
313 return self._copymap
313 return self._copymap
314
314
315 def _droppath(self, f):
315 def _droppath(self, f):
316 if self[f] not in "?r" and "_dirs" in self.__dict__:
316 if self[f] not in "?r" and "_dirs" in self.__dict__:
317 self._dirs.delpath(f)
317 self._dirs.delpath(f)
318
318
319 def _addpath(self, f, state, mode, size, mtime):
319 def _addpath(self, f, state, mode, size, mtime):
320 oldstate = self[f]
320 oldstate = self[f]
321 if state == 'a' or oldstate == 'r':
321 if state == 'a' or oldstate == 'r':
322 scmutil.checkfilename(f)
322 scmutil.checkfilename(f)
323 if f in self._dirs:
323 if f in self._dirs:
324 raise util.Abort(_('directory %r already in dirstate') % f)
324 raise util.Abort(_('directory %r already in dirstate') % f)
325 # shadows
325 # shadows
326 for d in scmutil.finddirs(f):
326 for d in scmutil.finddirs(f):
327 if d in self._dirs:
327 if d in self._dirs:
328 break
328 break
329 if d in self._map and self[d] != 'r':
329 if d in self._map and self[d] != 'r':
330 raise util.Abort(
330 raise util.Abort(
331 _('file %r in dirstate clashes with %r') % (d, f))
331 _('file %r in dirstate clashes with %r') % (d, f))
332 if oldstate in "?r" and "_dirs" in self.__dict__:
332 if oldstate in "?r" and "_dirs" in self.__dict__:
333 self._dirs.addpath(f)
333 self._dirs.addpath(f)
334 self._dirty = True
334 self._dirty = True
335 self._map[f] = (state, mode, size, mtime)
335 self._map[f] = (state, mode, size, mtime)
336
336
337 def normal(self, f):
337 def normal(self, f):
338 '''Mark a file normal and clean.'''
338 '''Mark a file normal and clean.'''
339 s = os.lstat(self._join(f))
339 s = os.lstat(self._join(f))
340 mtime = int(s.st_mtime)
340 mtime = int(s.st_mtime)
341 self._addpath(f, 'n', s.st_mode,
341 self._addpath(f, 'n', s.st_mode,
342 s.st_size & _rangemask, mtime & _rangemask)
342 s.st_size & _rangemask, mtime & _rangemask)
343 if f in self._copymap:
343 if f in self._copymap:
344 del self._copymap[f]
344 del self._copymap[f]
345 if mtime > self._lastnormaltime:
345 if mtime > self._lastnormaltime:
346 # Remember the most recent modification timeslot for status(),
346 # Remember the most recent modification timeslot for status(),
347 # to make sure we won't miss future size-preserving file content
347 # to make sure we won't miss future size-preserving file content
348 # modifications that happen within the same timeslot.
348 # modifications that happen within the same timeslot.
349 self._lastnormaltime = mtime
349 self._lastnormaltime = mtime
350
350
351 def normallookup(self, f):
351 def normallookup(self, f):
352 '''Mark a file normal, but possibly dirty.'''
352 '''Mark a file normal, but possibly dirty.'''
353 if self._pl[1] != nullid and f in self._map:
353 if self._pl[1] != nullid and f in self._map:
354 # if there is a merge going on and the file was either
354 # if there is a merge going on and the file was either
355 # in state 'm' (-1) or coming from other parent (-2) before
355 # in state 'm' (-1) or coming from other parent (-2) before
356 # being removed, restore that state.
356 # being removed, restore that state.
357 entry = self._map[f]
357 entry = self._map[f]
358 if entry[0] == 'r' and entry[2] in (-1, -2):
358 if entry[0] == 'r' and entry[2] in (-1, -2):
359 source = self._copymap.get(f)
359 source = self._copymap.get(f)
360 if entry[2] == -1:
360 if entry[2] == -1:
361 self.merge(f)
361 self.merge(f)
362 elif entry[2] == -2:
362 elif entry[2] == -2:
363 self.otherparent(f)
363 self.otherparent(f)
364 if source:
364 if source:
365 self.copy(source, f)
365 self.copy(source, f)
366 return
366 return
367 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
367 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
368 return
368 return
369 self._addpath(f, 'n', 0, -1, -1)
369 self._addpath(f, 'n', 0, -1, -1)
370 if f in self._copymap:
370 if f in self._copymap:
371 del self._copymap[f]
371 del self._copymap[f]
372
372
373 def otherparent(self, f):
373 def otherparent(self, f):
374 '''Mark as coming from the other parent, always dirty.'''
374 '''Mark as coming from the other parent, always dirty.'''
375 if self._pl[1] == nullid:
375 if self._pl[1] == nullid:
376 raise util.Abort(_("setting %r to other parent "
376 raise util.Abort(_("setting %r to other parent "
377 "only allowed in merges") % f)
377 "only allowed in merges") % f)
378 self._addpath(f, 'n', 0, -2, -1)
378 self._addpath(f, 'n', 0, -2, -1)
379 if f in self._copymap:
379 if f in self._copymap:
380 del self._copymap[f]
380 del self._copymap[f]
381
381
382 def add(self, f):
382 def add(self, f):
383 '''Mark a file added.'''
383 '''Mark a file added.'''
384 self._addpath(f, 'a', 0, -1, -1)
384 self._addpath(f, 'a', 0, -1, -1)
385 if f in self._copymap:
385 if f in self._copymap:
386 del self._copymap[f]
386 del self._copymap[f]
387
387
388 def remove(self, f):
388 def remove(self, f):
389 '''Mark a file removed.'''
389 '''Mark a file removed.'''
390 self._dirty = True
390 self._dirty = True
391 self._droppath(f)
391 self._droppath(f)
392 size = 0
392 size = 0
393 if self._pl[1] != nullid and f in self._map:
393 if self._pl[1] != nullid and f in self._map:
394 # backup the previous state
394 # backup the previous state
395 entry = self._map[f]
395 entry = self._map[f]
396 if entry[0] == 'm': # merge
396 if entry[0] == 'm': # merge
397 size = -1
397 size = -1
398 elif entry[0] == 'n' and entry[2] == -2: # other parent
398 elif entry[0] == 'n' and entry[2] == -2: # other parent
399 size = -2
399 size = -2
400 self._map[f] = ('r', 0, size, 0)
400 self._map[f] = ('r', 0, size, 0)
401 if size == 0 and f in self._copymap:
401 if size == 0 and f in self._copymap:
402 del self._copymap[f]
402 del self._copymap[f]
403
403
404 def merge(self, f):
404 def merge(self, f):
405 '''Mark a file merged.'''
405 '''Mark a file merged.'''
406 if self._pl[1] == nullid:
406 if self._pl[1] == nullid:
407 return self.normallookup(f)
407 return self.normallookup(f)
408 s = os.lstat(self._join(f))
408 s = os.lstat(self._join(f))
409 self._addpath(f, 'm', s.st_mode,
409 self._addpath(f, 'm', s.st_mode,
410 s.st_size & _rangemask, int(s.st_mtime) & _rangemask)
410 s.st_size & _rangemask, int(s.st_mtime) & _rangemask)
411 if f in self._copymap:
411 if f in self._copymap:
412 del self._copymap[f]
412 del self._copymap[f]
413
413
414 def drop(self, f):
414 def drop(self, f):
415 '''Drop a file from the dirstate'''
415 '''Drop a file from the dirstate'''
416 if f in self._map:
416 if f in self._map:
417 self._dirty = True
417 self._dirty = True
418 self._droppath(f)
418 self._droppath(f)
419 del self._map[f]
419 del self._map[f]
420
420
421 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
421 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
422 normed = util.normcase(path)
422 normed = util.normcase(path)
423 folded = self._foldmap.get(normed, None)
423 folded = self._foldmap.get(normed, None)
424 if folded is None:
424 if folded is None:
425 if isknown:
425 if isknown:
426 folded = path
426 folded = path
427 else:
427 else:
428 if exists is None:
428 if exists is None:
429 exists = os.path.lexists(os.path.join(self._root, path))
429 exists = os.path.lexists(os.path.join(self._root, path))
430 if not exists:
430 if not exists:
431 # Maybe a path component exists
431 # Maybe a path component exists
432 if not ignoremissing and '/' in path:
432 if not ignoremissing and '/' in path:
433 d, f = path.rsplit('/', 1)
433 d, f = path.rsplit('/', 1)
434 d = self._normalize(d, isknown, ignoremissing, None)
434 d = self._normalize(d, isknown, ignoremissing, None)
435 folded = d + "/" + f
435 folded = d + "/" + f
436 else:
436 else:
437 # No path components, preserve original case
437 # No path components, preserve original case
438 folded = path
438 folded = path
439 else:
439 else:
440 # recursively normalize leading directory components
440 # recursively normalize leading directory components
441 # against dirstate
441 # against dirstate
442 if '/' in normed:
442 if '/' in normed:
443 d, f = normed.rsplit('/', 1)
443 d, f = normed.rsplit('/', 1)
444 d = self._normalize(d, isknown, ignoremissing, True)
444 d = self._normalize(d, isknown, ignoremissing, True)
445 r = self._root + "/" + d
445 r = self._root + "/" + d
446 folded = d + "/" + util.fspath(f, r)
446 folded = d + "/" + util.fspath(f, r)
447 else:
447 else:
448 folded = util.fspath(normed, self._root)
448 folded = util.fspath(normed, self._root)
449 self._foldmap[normed] = folded
449 self._foldmap[normed] = folded
450
450
451 return folded
451 return folded
452
452
453 def normalize(self, path, isknown=False, ignoremissing=False):
453 def normalize(self, path, isknown=False, ignoremissing=False):
454 '''
454 '''
455 normalize the case of a pathname when on a casefolding filesystem
455 normalize the case of a pathname when on a casefolding filesystem
456
456
457 isknown specifies whether the filename came from walking the
457 isknown specifies whether the filename came from walking the
458 disk, to avoid extra filesystem access.
458 disk, to avoid extra filesystem access.
459
459
460 If ignoremissing is True, missing path are returned
460 If ignoremissing is True, missing path are returned
461 unchanged. Otherwise, we try harder to normalize possibly
461 unchanged. Otherwise, we try harder to normalize possibly
462 existing path components.
462 existing path components.
463
463
464 The normalized case is determined based on the following precedence:
464 The normalized case is determined based on the following precedence:
465
465
466 - version of name already stored in the dirstate
466 - version of name already stored in the dirstate
467 - version of name stored on disk
467 - version of name stored on disk
468 - version provided via command arguments
468 - version provided via command arguments
469 '''
469 '''
470
470
471 if self._checkcase:
471 if self._checkcase:
472 return self._normalize(path, isknown, ignoremissing)
472 return self._normalize(path, isknown, ignoremissing)
473 return path
473 return path
474
474
475 def clear(self):
475 def clear(self):
476 self._map = {}
476 self._map = {}
477 if "_dirs" in self.__dict__:
477 if "_dirs" in self.__dict__:
478 delattr(self, "_dirs")
478 delattr(self, "_dirs")
479 self._copymap = {}
479 self._copymap = {}
480 self._pl = [nullid, nullid]
480 self._pl = [nullid, nullid]
481 self._lastnormaltime = 0
481 self._lastnormaltime = 0
482 self._dirty = True
482 self._dirty = True
483
483
484 def rebuild(self, parent, allfiles, changedfiles=None):
484 def rebuild(self, parent, allfiles, changedfiles=None):
485 changedfiles = changedfiles or allfiles
485 changedfiles = changedfiles or allfiles
486 oldmap = self._map
486 oldmap = self._map
487 self.clear()
487 self.clear()
488 for f in allfiles:
488 for f in allfiles:
489 if f not in changedfiles:
489 if f not in changedfiles:
490 self._map[f] = oldmap[f]
490 self._map[f] = oldmap[f]
491 else:
491 else:
492 if 'x' in allfiles.flags(f):
492 if 'x' in allfiles.flags(f):
493 self._map[f] = ('n', 0777, -1, 0)
493 self._map[f] = ('n', 0777, -1, 0)
494 else:
494 else:
495 self._map[f] = ('n', 0666, -1, 0)
495 self._map[f] = ('n', 0666, -1, 0)
496 self._pl = (parent, nullid)
496 self._pl = (parent, nullid)
497 self._dirty = True
497 self._dirty = True
498
498
499 def write(self):
499 def write(self):
500 if not self._dirty:
500 if not self._dirty:
501 return
501 return
502 st = self._opener("dirstate", "w", atomictemp=True)
502 st = self._opener("dirstate", "w", atomictemp=True)
503
503
504 def finish(s):
504 def finish(s):
505 st.write(s)
505 st.write(s)
506 st.close()
506 st.close()
507 self._lastnormaltime = 0
507 self._lastnormaltime = 0
508 self._dirty = self._dirtypl = False
508 self._dirty = self._dirtypl = False
509
509
510 # use the modification time of the newly created temporary file as the
510 # use the modification time of the newly created temporary file as the
511 # filesystem's notion of 'now'
511 # filesystem's notion of 'now'
512 now = util.fstat(st).st_mtime
512 now = util.fstat(st).st_mtime
513 finish(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
513 finish(parsers.pack_dirstate(self._map, self._copymap, self._pl, now))
514
514
515 def _dirignore(self, f):
515 def _dirignore(self, f):
516 if f == '.':
516 if f == '.':
517 return False
517 return False
518 if self._ignore(f):
518 if self._ignore(f):
519 return True
519 return True
520 for p in scmutil.finddirs(f):
520 for p in scmutil.finddirs(f):
521 if self._ignore(p):
521 if self._ignore(p):
522 return True
522 return True
523 return False
523 return False
524
524
525 def _walkexplicit(self, match, subrepos):
525 def _walkexplicit(self, match, subrepos):
526 '''Get stat data about the files explicitly specified by match.
526 '''Get stat data about the files explicitly specified by match.
527
527
528 Return a triple (results, dirsfound, dirsnotfound).
528 Return a triple (results, dirsfound, dirsnotfound).
529 - results is a mapping from filename to stat result. It also contains
529 - results is a mapping from filename to stat result. It also contains
530 listings mapping subrepos and .hg to None.
530 listings mapping subrepos and .hg to None.
531 - dirsfound is a list of files found to be directories.
531 - dirsfound is a list of files found to be directories.
532 - dirsnotfound is a list of files that the dirstate thinks are
532 - dirsnotfound is a list of files that the dirstate thinks are
533 directories and that were not found.'''
533 directories and that were not found.'''
534
534
535 def badtype(mode):
535 def badtype(mode):
536 kind = _('unknown')
536 kind = _('unknown')
537 if stat.S_ISCHR(mode):
537 if stat.S_ISCHR(mode):
538 kind = _('character device')
538 kind = _('character device')
539 elif stat.S_ISBLK(mode):
539 elif stat.S_ISBLK(mode):
540 kind = _('block device')
540 kind = _('block device')
541 elif stat.S_ISFIFO(mode):
541 elif stat.S_ISFIFO(mode):
542 kind = _('fifo')
542 kind = _('fifo')
543 elif stat.S_ISSOCK(mode):
543 elif stat.S_ISSOCK(mode):
544 kind = _('socket')
544 kind = _('socket')
545 elif stat.S_ISDIR(mode):
545 elif stat.S_ISDIR(mode):
546 kind = _('directory')
546 kind = _('directory')
547 return _('unsupported file type (type is %s)') % kind
547 return _('unsupported file type (type is %s)') % kind
548
548
549 matchedir = match.explicitdir
549 matchedir = match.explicitdir
550 badfn = match.bad
550 badfn = match.bad
551 dmap = self._map
551 dmap = self._map
552 normpath = util.normpath
552 normpath = util.normpath
553 lstat = os.lstat
553 lstat = os.lstat
554 getkind = stat.S_IFMT
554 getkind = stat.S_IFMT
555 dirkind = stat.S_IFDIR
555 dirkind = stat.S_IFDIR
556 regkind = stat.S_IFREG
556 regkind = stat.S_IFREG
557 lnkkind = stat.S_IFLNK
557 lnkkind = stat.S_IFLNK
558 join = self._join
558 join = self._join
559 dirsfound = []
559 dirsfound = []
560 foundadd = dirsfound.append
560 foundadd = dirsfound.append
561 dirsnotfound = []
561 dirsnotfound = []
562 notfoundadd = dirsnotfound.append
562 notfoundadd = dirsnotfound.append
563
563
564 if match.matchfn != match.exact and self._checkcase:
564 if match.matchfn != match.exact and self._checkcase:
565 normalize = self._normalize
565 normalize = self._normalize
566 else:
566 else:
567 normalize = None
567 normalize = None
568
568
569 files = sorted(match.files())
569 files = sorted(match.files())
570 subrepos.sort()
570 subrepos.sort()
571 i, j = 0, 0
571 i, j = 0, 0
572 while i < len(files) and j < len(subrepos):
572 while i < len(files) and j < len(subrepos):
573 subpath = subrepos[j] + "/"
573 subpath = subrepos[j] + "/"
574 if files[i] < subpath:
574 if files[i] < subpath:
575 i += 1
575 i += 1
576 continue
576 continue
577 while i < len(files) and files[i].startswith(subpath):
577 while i < len(files) and files[i].startswith(subpath):
578 del files[i]
578 del files[i]
579 j += 1
579 j += 1
580
580
581 if not files or '.' in files:
581 if not files or '.' in files:
582 files = ['']
582 files = ['']
583 results = dict.fromkeys(subrepos)
583 results = dict.fromkeys(subrepos)
584 results['.hg'] = None
584 results['.hg'] = None
585
585
586 for ff in files:
586 for ff in files:
587 if normalize:
587 if normalize:
588 nf = normalize(normpath(ff), False, True)
588 nf = normalize(normpath(ff), False, True)
589 else:
589 else:
590 nf = normpath(ff)
590 nf = normpath(ff)
591 if nf in results:
591 if nf in results:
592 continue
592 continue
593
593
594 try:
594 try:
595 st = lstat(join(nf))
595 st = lstat(join(nf))
596 kind = getkind(st.st_mode)
596 kind = getkind(st.st_mode)
597 if kind == dirkind:
597 if kind == dirkind:
598 if nf in dmap:
598 if nf in dmap:
599 #file deleted on disk but still in dirstate
599 #file deleted on disk but still in dirstate
600 results[nf] = None
600 results[nf] = None
601 if matchedir:
601 if matchedir:
602 matchedir(nf)
602 matchedir(nf)
603 foundadd(nf)
603 foundadd(nf)
604 elif kind == regkind or kind == lnkkind:
604 elif kind == regkind or kind == lnkkind:
605 results[nf] = st
605 results[nf] = st
606 else:
606 else:
607 badfn(ff, badtype(kind))
607 badfn(ff, badtype(kind))
608 if nf in dmap:
608 if nf in dmap:
609 results[nf] = None
609 results[nf] = None
610 except OSError, inst:
610 except OSError, inst:
611 if nf in dmap: # does it exactly match a file?
611 if nf in dmap: # does it exactly match a file?
612 results[nf] = None
612 results[nf] = None
613 else: # does it match a directory?
613 else: # does it match a directory?
614 prefix = nf + "/"
614 prefix = nf + "/"
615 for fn in dmap:
615 for fn in dmap:
616 if fn.startswith(prefix):
616 if fn.startswith(prefix):
617 if matchedir:
617 if matchedir:
618 matchedir(nf)
618 matchedir(nf)
619 notfoundadd(nf)
619 notfoundadd(nf)
620 break
620 break
621 else:
621 else:
622 badfn(ff, inst.strerror)
622 badfn(ff, inst.strerror)
623
623
624 return results, dirsfound, dirsnotfound
624 return results, dirsfound, dirsnotfound
625
625
626 def walk(self, match, subrepos, unknown, ignored, full=True):
626 def walk(self, match, subrepos, unknown, ignored, full=True):
627 '''
627 '''
628 Walk recursively through the directory tree, finding all files
628 Walk recursively through the directory tree, finding all files
629 matched by match.
629 matched by match.
630
630
631 If full is False, maybe skip some known-clean files.
631 If full is False, maybe skip some known-clean files.
632
632
633 Return a dict mapping filename to stat-like object (either
633 Return a dict mapping filename to stat-like object (either
634 mercurial.osutil.stat instance or return value of os.stat()).
634 mercurial.osutil.stat instance or return value of os.stat()).
635
635
636 '''
636 '''
637 # full is a flag that extensions that hook into walk can use -- this
637 # full is a flag that extensions that hook into walk can use -- this
638 # implementation doesn't use it at all. This satisfies the contract
638 # implementation doesn't use it at all. This satisfies the contract
639 # because we only guarantee a "maybe".
639 # because we only guarantee a "maybe".
640
640
641 def fwarn(f, msg):
641 def fwarn(f, msg):
642 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
642 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
643 return False
643 return False
644
644
645 ignore = self._ignore
645 ignore = self._ignore
646 dirignore = self._dirignore
646 dirignore = self._dirignore
647 if ignored:
647 if ignored:
648 ignore = util.never
648 ignore = util.never
649 dirignore = util.never
649 dirignore = util.never
650 elif not unknown:
650 elif not unknown:
651 # if unknown and ignored are False, skip step 2
651 # if unknown and ignored are False, skip step 2
652 ignore = util.always
652 ignore = util.always
653 dirignore = util.always
653 dirignore = util.always
654
654
655 matchfn = match.matchfn
655 matchfn = match.matchfn
656 matchalways = match.always()
656 matchalways = match.always()
657 matchtdir = match.traversedir
657 matchtdir = match.traversedir
658 dmap = self._map
658 dmap = self._map
659 listdir = osutil.listdir
659 listdir = osutil.listdir
660 lstat = os.lstat
660 lstat = os.lstat
661 dirkind = stat.S_IFDIR
661 dirkind = stat.S_IFDIR
662 regkind = stat.S_IFREG
662 regkind = stat.S_IFREG
663 lnkkind = stat.S_IFLNK
663 lnkkind = stat.S_IFLNK
664 join = self._join
664 join = self._join
665
665
666 exact = skipstep3 = False
666 exact = skipstep3 = False
667 if matchfn == match.exact: # match.exact
667 if matchfn == match.exact: # match.exact
668 exact = True
668 exact = True
669 dirignore = util.always # skip step 2
669 dirignore = util.always # skip step 2
670 elif match.files() and not match.anypats(): # match.match, no patterns
670 elif match.files() and not match.anypats(): # match.match, no patterns
671 skipstep3 = True
671 skipstep3 = True
672
672
673 if not exact and self._checkcase:
673 if not exact and self._checkcase:
674 normalize = self._normalize
674 normalize = self._normalize
675 skipstep3 = False
675 skipstep3 = False
676 else:
676 else:
677 normalize = None
677 normalize = None
678
678
679 # step 1: find all explicit files
679 # step 1: find all explicit files
680 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
680 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
681
681
682 skipstep3 = skipstep3 and not (work or dirsnotfound)
682 skipstep3 = skipstep3 and not (work or dirsnotfound)
683 work = [d for d in work if not dirignore(d)]
683 work = [d for d in work if not dirignore(d)]
684 wadd = work.append
684 wadd = work.append
685
685
686 # step 2: visit subdirectories
686 # step 2: visit subdirectories
687 while work:
687 while work:
688 nd = work.pop()
688 nd = work.pop()
689 skip = None
689 skip = None
690 if nd == '.':
690 if nd == '.':
691 nd = ''
691 nd = ''
692 else:
692 else:
693 skip = '.hg'
693 skip = '.hg'
694 try:
694 try:
695 entries = listdir(join(nd), stat=True, skip=skip)
695 entries = listdir(join(nd), stat=True, skip=skip)
696 except OSError, inst:
696 except OSError, inst:
697 if inst.errno in (errno.EACCES, errno.ENOENT):
697 if inst.errno in (errno.EACCES, errno.ENOENT):
698 fwarn(nd, inst.strerror)
698 fwarn(nd, inst.strerror)
699 continue
699 continue
700 raise
700 raise
701 for f, kind, st in entries:
701 for f, kind, st in entries:
702 if normalize:
702 if normalize:
703 nf = normalize(nd and (nd + "/" + f) or f, True, True)
703 nf = normalize(nd and (nd + "/" + f) or f, True, True)
704 else:
704 else:
705 nf = nd and (nd + "/" + f) or f
705 nf = nd and (nd + "/" + f) or f
706 if nf not in results:
706 if nf not in results:
707 if kind == dirkind:
707 if kind == dirkind:
708 if not ignore(nf):
708 if not ignore(nf):
709 if matchtdir:
709 if matchtdir:
710 matchtdir(nf)
710 matchtdir(nf)
711 wadd(nf)
711 wadd(nf)
712 if nf in dmap and (matchalways or matchfn(nf)):
712 if nf in dmap and (matchalways or matchfn(nf)):
713 results[nf] = None
713 results[nf] = None
714 elif kind == regkind or kind == lnkkind:
714 elif kind == regkind or kind == lnkkind:
715 if nf in dmap:
715 if nf in dmap:
716 if matchalways or matchfn(nf):
716 if matchalways or matchfn(nf):
717 results[nf] = st
717 results[nf] = st
718 elif (matchalways or matchfn(nf)) and not ignore(nf):
718 elif (matchalways or matchfn(nf)) and not ignore(nf):
719 results[nf] = st
719 results[nf] = st
720 elif nf in dmap and (matchalways or matchfn(nf)):
720 elif nf in dmap and (matchalways or matchfn(nf)):
721 results[nf] = None
721 results[nf] = None
722
722
723 for s in subrepos:
723 for s in subrepos:
724 del results[s]
724 del results[s]
725 del results['.hg']
725 del results['.hg']
726
726
727 # step 3: report unseen items in the dmap hash
727 # step 3: report unseen items in the dmap hash
728 if not skipstep3 and not exact:
728 if not skipstep3 and not exact:
729 if not results and matchalways:
729 if not results and matchalways:
730 visit = dmap.keys()
730 visit = dmap.keys()
731 else:
731 else:
732 visit = [f for f in dmap if f not in results and matchfn(f)]
732 visit = [f for f in dmap if f not in results and matchfn(f)]
733 visit.sort()
733 visit.sort()
734
734
735 if unknown:
735 if unknown:
736 # unknown == True means we walked the full directory tree above.
736 # unknown == True means we walked the full directory tree above.
737 # So if a file is not seen it was either a) not matching matchfn
737 # So if a file is not seen it was either a) not matching matchfn
738 # b) ignored, c) missing, or d) under a symlink directory.
738 # b) ignored, c) missing, or d) under a symlink directory.
739 audit_path = scmutil.pathauditor(self._root)
739 audit_path = pathutil.pathauditor(self._root)
740
740
741 for nf in iter(visit):
741 for nf in iter(visit):
742 # Report ignored items in the dmap as long as they are not
742 # Report ignored items in the dmap as long as they are not
743 # under a symlink directory.
743 # under a symlink directory.
744 if audit_path.check(nf):
744 if audit_path.check(nf):
745 try:
745 try:
746 results[nf] = lstat(join(nf))
746 results[nf] = lstat(join(nf))
747 except OSError:
747 except OSError:
748 # file doesn't exist
748 # file doesn't exist
749 results[nf] = None
749 results[nf] = None
750 else:
750 else:
751 # It's either missing or under a symlink directory
751 # It's either missing or under a symlink directory
752 results[nf] = None
752 results[nf] = None
753 else:
753 else:
754 # We may not have walked the full directory tree above,
754 # We may not have walked the full directory tree above,
755 # so stat everything we missed.
755 # so stat everything we missed.
756 nf = iter(visit).next
756 nf = iter(visit).next
757 for st in util.statfiles([join(i) for i in visit]):
757 for st in util.statfiles([join(i) for i in visit]):
758 results[nf()] = st
758 results[nf()] = st
759 return results
759 return results
760
760
761 def status(self, match, subrepos, ignored, clean, unknown):
761 def status(self, match, subrepos, ignored, clean, unknown):
762 '''Determine the status of the working copy relative to the
762 '''Determine the status of the working copy relative to the
763 dirstate and return a tuple of lists (unsure, modified, added,
763 dirstate and return a tuple of lists (unsure, modified, added,
764 removed, deleted, unknown, ignored, clean), where:
764 removed, deleted, unknown, ignored, clean), where:
765
765
766 unsure:
766 unsure:
767 files that might have been modified since the dirstate was
767 files that might have been modified since the dirstate was
768 written, but need to be read to be sure (size is the same
768 written, but need to be read to be sure (size is the same
769 but mtime differs)
769 but mtime differs)
770 modified:
770 modified:
771 files that have definitely been modified since the dirstate
771 files that have definitely been modified since the dirstate
772 was written (different size or mode)
772 was written (different size or mode)
773 added:
773 added:
774 files that have been explicitly added with hg add
774 files that have been explicitly added with hg add
775 removed:
775 removed:
776 files that have been explicitly removed with hg remove
776 files that have been explicitly removed with hg remove
777 deleted:
777 deleted:
778 files that have been deleted through other means ("missing")
778 files that have been deleted through other means ("missing")
779 unknown:
779 unknown:
780 files not in the dirstate that are not ignored
780 files not in the dirstate that are not ignored
781 ignored:
781 ignored:
782 files not in the dirstate that are ignored
782 files not in the dirstate that are ignored
783 (by _dirignore())
783 (by _dirignore())
784 clean:
784 clean:
785 files that have definitely not been modified since the
785 files that have definitely not been modified since the
786 dirstate was written
786 dirstate was written
787 '''
787 '''
788 listignored, listclean, listunknown = ignored, clean, unknown
788 listignored, listclean, listunknown = ignored, clean, unknown
789 lookup, modified, added, unknown, ignored = [], [], [], [], []
789 lookup, modified, added, unknown, ignored = [], [], [], [], []
790 removed, deleted, clean = [], [], []
790 removed, deleted, clean = [], [], []
791
791
792 dmap = self._map
792 dmap = self._map
793 ladd = lookup.append # aka "unsure"
793 ladd = lookup.append # aka "unsure"
794 madd = modified.append
794 madd = modified.append
795 aadd = added.append
795 aadd = added.append
796 uadd = unknown.append
796 uadd = unknown.append
797 iadd = ignored.append
797 iadd = ignored.append
798 radd = removed.append
798 radd = removed.append
799 dadd = deleted.append
799 dadd = deleted.append
800 cadd = clean.append
800 cadd = clean.append
801 mexact = match.exact
801 mexact = match.exact
802 dirignore = self._dirignore
802 dirignore = self._dirignore
803 checkexec = self._checkexec
803 checkexec = self._checkexec
804 copymap = self._copymap
804 copymap = self._copymap
805 lastnormaltime = self._lastnormaltime
805 lastnormaltime = self._lastnormaltime
806
806
807 # We need to do full walks when either
807 # We need to do full walks when either
808 # - we're listing all clean files, or
808 # - we're listing all clean files, or
809 # - match.traversedir does something, because match.traversedir should
809 # - match.traversedir does something, because match.traversedir should
810 # be called for every dir in the working dir
810 # be called for every dir in the working dir
811 full = listclean or match.traversedir is not None
811 full = listclean or match.traversedir is not None
812 for fn, st in self.walk(match, subrepos, listunknown, listignored,
812 for fn, st in self.walk(match, subrepos, listunknown, listignored,
813 full=full).iteritems():
813 full=full).iteritems():
814 if fn not in dmap:
814 if fn not in dmap:
815 if (listignored or mexact(fn)) and dirignore(fn):
815 if (listignored or mexact(fn)) and dirignore(fn):
816 if listignored:
816 if listignored:
817 iadd(fn)
817 iadd(fn)
818 else:
818 else:
819 uadd(fn)
819 uadd(fn)
820 continue
820 continue
821
821
822 state, mode, size, time = dmap[fn]
822 state, mode, size, time = dmap[fn]
823
823
824 if not st and state in "nma":
824 if not st and state in "nma":
825 dadd(fn)
825 dadd(fn)
826 elif state == 'n':
826 elif state == 'n':
827 mtime = int(st.st_mtime)
827 mtime = int(st.st_mtime)
828 if (size >= 0 and
828 if (size >= 0 and
829 ((size != st.st_size and size != st.st_size & _rangemask)
829 ((size != st.st_size and size != st.st_size & _rangemask)
830 or ((mode ^ st.st_mode) & 0100 and checkexec))
830 or ((mode ^ st.st_mode) & 0100 and checkexec))
831 or size == -2 # other parent
831 or size == -2 # other parent
832 or fn in copymap):
832 or fn in copymap):
833 madd(fn)
833 madd(fn)
834 elif time != mtime and time != mtime & _rangemask:
834 elif time != mtime and time != mtime & _rangemask:
835 ladd(fn)
835 ladd(fn)
836 elif mtime == lastnormaltime:
836 elif mtime == lastnormaltime:
837 # fn may have been changed in the same timeslot without
837 # fn may have been changed in the same timeslot without
838 # changing its size. This can happen if we quickly do
838 # changing its size. This can happen if we quickly do
839 # multiple commits in a single transaction.
839 # multiple commits in a single transaction.
840 # Force lookup, so we don't miss such a racy file change.
840 # Force lookup, so we don't miss such a racy file change.
841 ladd(fn)
841 ladd(fn)
842 elif listclean:
842 elif listclean:
843 cadd(fn)
843 cadd(fn)
844 elif state == 'm':
844 elif state == 'm':
845 madd(fn)
845 madd(fn)
846 elif state == 'a':
846 elif state == 'a':
847 aadd(fn)
847 aadd(fn)
848 elif state == 'r':
848 elif state == 'r':
849 radd(fn)
849 radd(fn)
850
850
851 return (lookup, modified, added, removed, deleted, unknown, ignored,
851 return (lookup, modified, added, removed, deleted, unknown, ignored,
852 clean)
852 clean)
@@ -1,408 +1,408 b''
1 # hgweb/webutil.py - utility library for the web interface.
1 # hgweb/webutil.py - utility library for the web interface.
2 #
2 #
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import os, copy
9 import os, copy
10 from mercurial import match, patch, scmutil, error, ui, util
10 from mercurial import match, patch, error, ui, util, pathutil
11 from mercurial.i18n import _
11 from mercurial.i18n import _
12 from mercurial.node import hex, nullid
12 from mercurial.node import hex, nullid
13 from common import ErrorResponse
13 from common import ErrorResponse
14 from common import HTTP_NOT_FOUND
14 from common import HTTP_NOT_FOUND
15 import difflib
15 import difflib
16
16
17 def up(p):
17 def up(p):
18 if p[0] != "/":
18 if p[0] != "/":
19 p = "/" + p
19 p = "/" + p
20 if p[-1] == "/":
20 if p[-1] == "/":
21 p = p[:-1]
21 p = p[:-1]
22 up = os.path.dirname(p)
22 up = os.path.dirname(p)
23 if up == "/":
23 if up == "/":
24 return "/"
24 return "/"
25 return up + "/"
25 return up + "/"
26
26
27 def _navseq(step, firststep=None):
27 def _navseq(step, firststep=None):
28 if firststep:
28 if firststep:
29 yield firststep
29 yield firststep
30 if firststep >= 20 and firststep <= 40:
30 if firststep >= 20 and firststep <= 40:
31 firststep = 50
31 firststep = 50
32 yield firststep
32 yield firststep
33 assert step > 0
33 assert step > 0
34 assert firststep > 0
34 assert firststep > 0
35 while step <= firststep:
35 while step <= firststep:
36 step *= 10
36 step *= 10
37 while True:
37 while True:
38 yield 1 * step
38 yield 1 * step
39 yield 3 * step
39 yield 3 * step
40 step *= 10
40 step *= 10
41
41
42 class revnav(object):
42 class revnav(object):
43
43
44 def __init__(self, repo):
44 def __init__(self, repo):
45 """Navigation generation object
45 """Navigation generation object
46
46
47 :repo: repo object we generate nav for
47 :repo: repo object we generate nav for
48 """
48 """
49 # used for hex generation
49 # used for hex generation
50 self._revlog = repo.changelog
50 self._revlog = repo.changelog
51
51
52 def __nonzero__(self):
52 def __nonzero__(self):
53 """return True if any revision to navigate over"""
53 """return True if any revision to navigate over"""
54 return self._first() is not None
54 return self._first() is not None
55
55
56 def _first(self):
56 def _first(self):
57 """return the minimum non-filtered changeset or None"""
57 """return the minimum non-filtered changeset or None"""
58 try:
58 try:
59 return iter(self._revlog).next()
59 return iter(self._revlog).next()
60 except StopIteration:
60 except StopIteration:
61 return None
61 return None
62
62
63 def hex(self, rev):
63 def hex(self, rev):
64 return hex(self._revlog.node(rev))
64 return hex(self._revlog.node(rev))
65
65
66 def gen(self, pos, pagelen, limit):
66 def gen(self, pos, pagelen, limit):
67 """computes label and revision id for navigation link
67 """computes label and revision id for navigation link
68
68
69 :pos: is the revision relative to which we generate navigation.
69 :pos: is the revision relative to which we generate navigation.
70 :pagelen: the size of each navigation page
70 :pagelen: the size of each navigation page
71 :limit: how far shall we link
71 :limit: how far shall we link
72
72
73 The return is:
73 The return is:
74 - a single element tuple
74 - a single element tuple
75 - containing a dictionary with a `before` and `after` key
75 - containing a dictionary with a `before` and `after` key
76 - values are generator functions taking arbitrary number of kwargs
76 - values are generator functions taking arbitrary number of kwargs
77 - yield items are dictionaries with `label` and `node` keys
77 - yield items are dictionaries with `label` and `node` keys
78 """
78 """
79 if not self:
79 if not self:
80 # empty repo
80 # empty repo
81 return ({'before': (), 'after': ()},)
81 return ({'before': (), 'after': ()},)
82
82
83 targets = []
83 targets = []
84 for f in _navseq(1, pagelen):
84 for f in _navseq(1, pagelen):
85 if f > limit:
85 if f > limit:
86 break
86 break
87 targets.append(pos + f)
87 targets.append(pos + f)
88 targets.append(pos - f)
88 targets.append(pos - f)
89 targets.sort()
89 targets.sort()
90
90
91 first = self._first()
91 first = self._first()
92 navbefore = [("(%i)" % first, self.hex(first))]
92 navbefore = [("(%i)" % first, self.hex(first))]
93 navafter = []
93 navafter = []
94 for rev in targets:
94 for rev in targets:
95 if rev not in self._revlog:
95 if rev not in self._revlog:
96 continue
96 continue
97 if pos < rev < limit:
97 if pos < rev < limit:
98 navafter.append(("+%d" % abs(rev - pos), self.hex(rev)))
98 navafter.append(("+%d" % abs(rev - pos), self.hex(rev)))
99 if 0 < rev < pos:
99 if 0 < rev < pos:
100 navbefore.append(("-%d" % abs(rev - pos), self.hex(rev)))
100 navbefore.append(("-%d" % abs(rev - pos), self.hex(rev)))
101
101
102
102
103 navafter.append(("tip", "tip"))
103 navafter.append(("tip", "tip"))
104
104
105 data = lambda i: {"label": i[0], "node": i[1]}
105 data = lambda i: {"label": i[0], "node": i[1]}
106 return ({'before': lambda **map: (data(i) for i in navbefore),
106 return ({'before': lambda **map: (data(i) for i in navbefore),
107 'after': lambda **map: (data(i) for i in navafter)},)
107 'after': lambda **map: (data(i) for i in navafter)},)
108
108
109 class filerevnav(revnav):
109 class filerevnav(revnav):
110
110
111 def __init__(self, repo, path):
111 def __init__(self, repo, path):
112 """Navigation generation object
112 """Navigation generation object
113
113
114 :repo: repo object we generate nav for
114 :repo: repo object we generate nav for
115 :path: path of the file we generate nav for
115 :path: path of the file we generate nav for
116 """
116 """
117 # used for iteration
117 # used for iteration
118 self._changelog = repo.unfiltered().changelog
118 self._changelog = repo.unfiltered().changelog
119 # used for hex generation
119 # used for hex generation
120 self._revlog = repo.file(path)
120 self._revlog = repo.file(path)
121
121
122 def hex(self, rev):
122 def hex(self, rev):
123 return hex(self._changelog.node(self._revlog.linkrev(rev)))
123 return hex(self._changelog.node(self._revlog.linkrev(rev)))
124
124
125
125
126 def _siblings(siblings=[], hiderev=None):
126 def _siblings(siblings=[], hiderev=None):
127 siblings = [s for s in siblings if s.node() != nullid]
127 siblings = [s for s in siblings if s.node() != nullid]
128 if len(siblings) == 1 and siblings[0].rev() == hiderev:
128 if len(siblings) == 1 and siblings[0].rev() == hiderev:
129 return
129 return
130 for s in siblings:
130 for s in siblings:
131 d = {'node': s.hex(), 'rev': s.rev()}
131 d = {'node': s.hex(), 'rev': s.rev()}
132 d['user'] = s.user()
132 d['user'] = s.user()
133 d['date'] = s.date()
133 d['date'] = s.date()
134 d['description'] = s.description()
134 d['description'] = s.description()
135 d['branch'] = s.branch()
135 d['branch'] = s.branch()
136 if util.safehasattr(s, 'path'):
136 if util.safehasattr(s, 'path'):
137 d['file'] = s.path()
137 d['file'] = s.path()
138 yield d
138 yield d
139
139
140 def parents(ctx, hide=None):
140 def parents(ctx, hide=None):
141 return _siblings(ctx.parents(), hide)
141 return _siblings(ctx.parents(), hide)
142
142
143 def children(ctx, hide=None):
143 def children(ctx, hide=None):
144 return _siblings(ctx.children(), hide)
144 return _siblings(ctx.children(), hide)
145
145
146 def renamelink(fctx):
146 def renamelink(fctx):
147 r = fctx.renamed()
147 r = fctx.renamed()
148 if r:
148 if r:
149 return [dict(file=r[0], node=hex(r[1]))]
149 return [dict(file=r[0], node=hex(r[1]))]
150 return []
150 return []
151
151
152 def nodetagsdict(repo, node):
152 def nodetagsdict(repo, node):
153 return [{"name": i} for i in repo.nodetags(node)]
153 return [{"name": i} for i in repo.nodetags(node)]
154
154
155 def nodebookmarksdict(repo, node):
155 def nodebookmarksdict(repo, node):
156 return [{"name": i} for i in repo.nodebookmarks(node)]
156 return [{"name": i} for i in repo.nodebookmarks(node)]
157
157
158 def nodebranchdict(repo, ctx):
158 def nodebranchdict(repo, ctx):
159 branches = []
159 branches = []
160 branch = ctx.branch()
160 branch = ctx.branch()
161 # If this is an empty repo, ctx.node() == nullid,
161 # If this is an empty repo, ctx.node() == nullid,
162 # ctx.branch() == 'default'.
162 # ctx.branch() == 'default'.
163 try:
163 try:
164 branchnode = repo.branchtip(branch)
164 branchnode = repo.branchtip(branch)
165 except error.RepoLookupError:
165 except error.RepoLookupError:
166 branchnode = None
166 branchnode = None
167 if branchnode == ctx.node():
167 if branchnode == ctx.node():
168 branches.append({"name": branch})
168 branches.append({"name": branch})
169 return branches
169 return branches
170
170
171 def nodeinbranch(repo, ctx):
171 def nodeinbranch(repo, ctx):
172 branches = []
172 branches = []
173 branch = ctx.branch()
173 branch = ctx.branch()
174 try:
174 try:
175 branchnode = repo.branchtip(branch)
175 branchnode = repo.branchtip(branch)
176 except error.RepoLookupError:
176 except error.RepoLookupError:
177 branchnode = None
177 branchnode = None
178 if branch != 'default' and branchnode != ctx.node():
178 if branch != 'default' and branchnode != ctx.node():
179 branches.append({"name": branch})
179 branches.append({"name": branch})
180 return branches
180 return branches
181
181
182 def nodebranchnodefault(ctx):
182 def nodebranchnodefault(ctx):
183 branches = []
183 branches = []
184 branch = ctx.branch()
184 branch = ctx.branch()
185 if branch != 'default':
185 if branch != 'default':
186 branches.append({"name": branch})
186 branches.append({"name": branch})
187 return branches
187 return branches
188
188
189 def showtag(repo, tmpl, t1, node=nullid, **args):
189 def showtag(repo, tmpl, t1, node=nullid, **args):
190 for t in repo.nodetags(node):
190 for t in repo.nodetags(node):
191 yield tmpl(t1, tag=t, **args)
191 yield tmpl(t1, tag=t, **args)
192
192
193 def showbookmark(repo, tmpl, t1, node=nullid, **args):
193 def showbookmark(repo, tmpl, t1, node=nullid, **args):
194 for t in repo.nodebookmarks(node):
194 for t in repo.nodebookmarks(node):
195 yield tmpl(t1, bookmark=t, **args)
195 yield tmpl(t1, bookmark=t, **args)
196
196
197 def cleanpath(repo, path):
197 def cleanpath(repo, path):
198 path = path.lstrip('/')
198 path = path.lstrip('/')
199 return scmutil.canonpath(repo.root, '', path)
199 return pathutil.canonpath(repo.root, '', path)
200
200
201 def changeidctx (repo, changeid):
201 def changeidctx (repo, changeid):
202 try:
202 try:
203 ctx = repo[changeid]
203 ctx = repo[changeid]
204 except error.RepoError:
204 except error.RepoError:
205 man = repo.manifest
205 man = repo.manifest
206 ctx = repo[man.linkrev(man.rev(man.lookup(changeid)))]
206 ctx = repo[man.linkrev(man.rev(man.lookup(changeid)))]
207
207
208 return ctx
208 return ctx
209
209
210 def changectx (repo, req):
210 def changectx (repo, req):
211 changeid = "tip"
211 changeid = "tip"
212 if 'node' in req.form:
212 if 'node' in req.form:
213 changeid = req.form['node'][0]
213 changeid = req.form['node'][0]
214 ipos=changeid.find(':')
214 ipos=changeid.find(':')
215 if ipos != -1:
215 if ipos != -1:
216 changeid = changeid[(ipos + 1):]
216 changeid = changeid[(ipos + 1):]
217 elif 'manifest' in req.form:
217 elif 'manifest' in req.form:
218 changeid = req.form['manifest'][0]
218 changeid = req.form['manifest'][0]
219
219
220 return changeidctx(repo, changeid)
220 return changeidctx(repo, changeid)
221
221
222 def basechangectx(repo, req):
222 def basechangectx(repo, req):
223 if 'node' in req.form:
223 if 'node' in req.form:
224 changeid = req.form['node'][0]
224 changeid = req.form['node'][0]
225 ipos=changeid.find(':')
225 ipos=changeid.find(':')
226 if ipos != -1:
226 if ipos != -1:
227 changeid = changeid[:ipos]
227 changeid = changeid[:ipos]
228 return changeidctx(repo, changeid)
228 return changeidctx(repo, changeid)
229
229
230 return None
230 return None
231
231
232 def filectx(repo, req):
232 def filectx(repo, req):
233 if 'file' not in req.form:
233 if 'file' not in req.form:
234 raise ErrorResponse(HTTP_NOT_FOUND, 'file not given')
234 raise ErrorResponse(HTTP_NOT_FOUND, 'file not given')
235 path = cleanpath(repo, req.form['file'][0])
235 path = cleanpath(repo, req.form['file'][0])
236 if 'node' in req.form:
236 if 'node' in req.form:
237 changeid = req.form['node'][0]
237 changeid = req.form['node'][0]
238 elif 'filenode' in req.form:
238 elif 'filenode' in req.form:
239 changeid = req.form['filenode'][0]
239 changeid = req.form['filenode'][0]
240 else:
240 else:
241 raise ErrorResponse(HTTP_NOT_FOUND, 'node or filenode not given')
241 raise ErrorResponse(HTTP_NOT_FOUND, 'node or filenode not given')
242 try:
242 try:
243 fctx = repo[changeid][path]
243 fctx = repo[changeid][path]
244 except error.RepoError:
244 except error.RepoError:
245 fctx = repo.filectx(path, fileid=changeid)
245 fctx = repo.filectx(path, fileid=changeid)
246
246
247 return fctx
247 return fctx
248
248
249 def listfilediffs(tmpl, files, node, max):
249 def listfilediffs(tmpl, files, node, max):
250 for f in files[:max]:
250 for f in files[:max]:
251 yield tmpl('filedifflink', node=hex(node), file=f)
251 yield tmpl('filedifflink', node=hex(node), file=f)
252 if len(files) > max:
252 if len(files) > max:
253 yield tmpl('fileellipses')
253 yield tmpl('fileellipses')
254
254
255 def diffs(repo, tmpl, ctx, basectx, files, parity, style):
255 def diffs(repo, tmpl, ctx, basectx, files, parity, style):
256
256
257 def countgen():
257 def countgen():
258 start = 1
258 start = 1
259 while True:
259 while True:
260 yield start
260 yield start
261 start += 1
261 start += 1
262
262
263 blockcount = countgen()
263 blockcount = countgen()
264 def prettyprintlines(diff, blockno):
264 def prettyprintlines(diff, blockno):
265 for lineno, l in enumerate(diff.splitlines(True)):
265 for lineno, l in enumerate(diff.splitlines(True)):
266 lineno = "%d.%d" % (blockno, lineno + 1)
266 lineno = "%d.%d" % (blockno, lineno + 1)
267 if l.startswith('+'):
267 if l.startswith('+'):
268 ltype = "difflineplus"
268 ltype = "difflineplus"
269 elif l.startswith('-'):
269 elif l.startswith('-'):
270 ltype = "difflineminus"
270 ltype = "difflineminus"
271 elif l.startswith('@'):
271 elif l.startswith('@'):
272 ltype = "difflineat"
272 ltype = "difflineat"
273 else:
273 else:
274 ltype = "diffline"
274 ltype = "diffline"
275 yield tmpl(ltype,
275 yield tmpl(ltype,
276 line=l,
276 line=l,
277 lineid="l%s" % lineno,
277 lineid="l%s" % lineno,
278 linenumber="% 8s" % lineno)
278 linenumber="% 8s" % lineno)
279
279
280 if files:
280 if files:
281 m = match.exact(repo.root, repo.getcwd(), files)
281 m = match.exact(repo.root, repo.getcwd(), files)
282 else:
282 else:
283 m = match.always(repo.root, repo.getcwd())
283 m = match.always(repo.root, repo.getcwd())
284
284
285 diffopts = patch.diffopts(repo.ui, untrusted=True)
285 diffopts = patch.diffopts(repo.ui, untrusted=True)
286 if basectx is None:
286 if basectx is None:
287 parents = ctx.parents()
287 parents = ctx.parents()
288 node1 = parents and parents[0].node() or nullid
288 node1 = parents and parents[0].node() or nullid
289 else:
289 else:
290 node1 = basectx.node()
290 node1 = basectx.node()
291 node2 = ctx.node()
291 node2 = ctx.node()
292
292
293 block = []
293 block = []
294 for chunk in patch.diff(repo, node1, node2, m, opts=diffopts):
294 for chunk in patch.diff(repo, node1, node2, m, opts=diffopts):
295 if chunk.startswith('diff') and block:
295 if chunk.startswith('diff') and block:
296 blockno = blockcount.next()
296 blockno = blockcount.next()
297 yield tmpl('diffblock', parity=parity.next(), blockno=blockno,
297 yield tmpl('diffblock', parity=parity.next(), blockno=blockno,
298 lines=prettyprintlines(''.join(block), blockno))
298 lines=prettyprintlines(''.join(block), blockno))
299 block = []
299 block = []
300 if chunk.startswith('diff') and style != 'raw':
300 if chunk.startswith('diff') and style != 'raw':
301 chunk = ''.join(chunk.splitlines(True)[1:])
301 chunk = ''.join(chunk.splitlines(True)[1:])
302 block.append(chunk)
302 block.append(chunk)
303 blockno = blockcount.next()
303 blockno = blockcount.next()
304 yield tmpl('diffblock', parity=parity.next(), blockno=blockno,
304 yield tmpl('diffblock', parity=parity.next(), blockno=blockno,
305 lines=prettyprintlines(''.join(block), blockno))
305 lines=prettyprintlines(''.join(block), blockno))
306
306
307 def compare(tmpl, context, leftlines, rightlines):
307 def compare(tmpl, context, leftlines, rightlines):
308 '''Generator function that provides side-by-side comparison data.'''
308 '''Generator function that provides side-by-side comparison data.'''
309
309
310 def compline(type, leftlineno, leftline, rightlineno, rightline):
310 def compline(type, leftlineno, leftline, rightlineno, rightline):
311 lineid = leftlineno and ("l%s" % leftlineno) or ''
311 lineid = leftlineno and ("l%s" % leftlineno) or ''
312 lineid += rightlineno and ("r%s" % rightlineno) or ''
312 lineid += rightlineno and ("r%s" % rightlineno) or ''
313 return tmpl('comparisonline',
313 return tmpl('comparisonline',
314 type=type,
314 type=type,
315 lineid=lineid,
315 lineid=lineid,
316 leftlinenumber="% 6s" % (leftlineno or ''),
316 leftlinenumber="% 6s" % (leftlineno or ''),
317 leftline=leftline or '',
317 leftline=leftline or '',
318 rightlinenumber="% 6s" % (rightlineno or ''),
318 rightlinenumber="% 6s" % (rightlineno or ''),
319 rightline=rightline or '')
319 rightline=rightline or '')
320
320
321 def getblock(opcodes):
321 def getblock(opcodes):
322 for type, llo, lhi, rlo, rhi in opcodes:
322 for type, llo, lhi, rlo, rhi in opcodes:
323 len1 = lhi - llo
323 len1 = lhi - llo
324 len2 = rhi - rlo
324 len2 = rhi - rlo
325 count = min(len1, len2)
325 count = min(len1, len2)
326 for i in xrange(count):
326 for i in xrange(count):
327 yield compline(type=type,
327 yield compline(type=type,
328 leftlineno=llo + i + 1,
328 leftlineno=llo + i + 1,
329 leftline=leftlines[llo + i],
329 leftline=leftlines[llo + i],
330 rightlineno=rlo + i + 1,
330 rightlineno=rlo + i + 1,
331 rightline=rightlines[rlo + i])
331 rightline=rightlines[rlo + i])
332 if len1 > len2:
332 if len1 > len2:
333 for i in xrange(llo + count, lhi):
333 for i in xrange(llo + count, lhi):
334 yield compline(type=type,
334 yield compline(type=type,
335 leftlineno=i + 1,
335 leftlineno=i + 1,
336 leftline=leftlines[i],
336 leftline=leftlines[i],
337 rightlineno=None,
337 rightlineno=None,
338 rightline=None)
338 rightline=None)
339 elif len2 > len1:
339 elif len2 > len1:
340 for i in xrange(rlo + count, rhi):
340 for i in xrange(rlo + count, rhi):
341 yield compline(type=type,
341 yield compline(type=type,
342 leftlineno=None,
342 leftlineno=None,
343 leftline=None,
343 leftline=None,
344 rightlineno=i + 1,
344 rightlineno=i + 1,
345 rightline=rightlines[i])
345 rightline=rightlines[i])
346
346
347 s = difflib.SequenceMatcher(None, leftlines, rightlines)
347 s = difflib.SequenceMatcher(None, leftlines, rightlines)
348 if context < 0:
348 if context < 0:
349 yield tmpl('comparisonblock', lines=getblock(s.get_opcodes()))
349 yield tmpl('comparisonblock', lines=getblock(s.get_opcodes()))
350 else:
350 else:
351 for oc in s.get_grouped_opcodes(n=context):
351 for oc in s.get_grouped_opcodes(n=context):
352 yield tmpl('comparisonblock', lines=getblock(oc))
352 yield tmpl('comparisonblock', lines=getblock(oc))
353
353
354 def diffstatgen(ctx, basectx):
354 def diffstatgen(ctx, basectx):
355 '''Generator function that provides the diffstat data.'''
355 '''Generator function that provides the diffstat data.'''
356
356
357 stats = patch.diffstatdata(util.iterlines(ctx.diff(basectx)))
357 stats = patch.diffstatdata(util.iterlines(ctx.diff(basectx)))
358 maxname, maxtotal, addtotal, removetotal, binary = patch.diffstatsum(stats)
358 maxname, maxtotal, addtotal, removetotal, binary = patch.diffstatsum(stats)
359 while True:
359 while True:
360 yield stats, maxname, maxtotal, addtotal, removetotal, binary
360 yield stats, maxname, maxtotal, addtotal, removetotal, binary
361
361
362 def diffsummary(statgen):
362 def diffsummary(statgen):
363 '''Return a short summary of the diff.'''
363 '''Return a short summary of the diff.'''
364
364
365 stats, maxname, maxtotal, addtotal, removetotal, binary = statgen.next()
365 stats, maxname, maxtotal, addtotal, removetotal, binary = statgen.next()
366 return _(' %d files changed, %d insertions(+), %d deletions(-)\n') % (
366 return _(' %d files changed, %d insertions(+), %d deletions(-)\n') % (
367 len(stats), addtotal, removetotal)
367 len(stats), addtotal, removetotal)
368
368
369 def diffstat(tmpl, ctx, statgen, parity):
369 def diffstat(tmpl, ctx, statgen, parity):
370 '''Return a diffstat template for each file in the diff.'''
370 '''Return a diffstat template for each file in the diff.'''
371
371
372 stats, maxname, maxtotal, addtotal, removetotal, binary = statgen.next()
372 stats, maxname, maxtotal, addtotal, removetotal, binary = statgen.next()
373 files = ctx.files()
373 files = ctx.files()
374
374
375 def pct(i):
375 def pct(i):
376 if maxtotal == 0:
376 if maxtotal == 0:
377 return 0
377 return 0
378 return (float(i) / maxtotal) * 100
378 return (float(i) / maxtotal) * 100
379
379
380 fileno = 0
380 fileno = 0
381 for filename, adds, removes, isbinary in stats:
381 for filename, adds, removes, isbinary in stats:
382 template = filename in files and 'diffstatlink' or 'diffstatnolink'
382 template = filename in files and 'diffstatlink' or 'diffstatnolink'
383 total = adds + removes
383 total = adds + removes
384 fileno += 1
384 fileno += 1
385 yield tmpl(template, node=ctx.hex(), file=filename, fileno=fileno,
385 yield tmpl(template, node=ctx.hex(), file=filename, fileno=fileno,
386 total=total, addpct=pct(adds), removepct=pct(removes),
386 total=total, addpct=pct(adds), removepct=pct(removes),
387 parity=parity.next())
387 parity=parity.next())
388
388
389 class sessionvars(object):
389 class sessionvars(object):
390 def __init__(self, vars, start='?'):
390 def __init__(self, vars, start='?'):
391 self.start = start
391 self.start = start
392 self.vars = vars
392 self.vars = vars
393 def __getitem__(self, key):
393 def __getitem__(self, key):
394 return self.vars[key]
394 return self.vars[key]
395 def __setitem__(self, key, value):
395 def __setitem__(self, key, value):
396 self.vars[key] = value
396 self.vars[key] = value
397 def __copy__(self):
397 def __copy__(self):
398 return sessionvars(copy.copy(self.vars), self.start)
398 return sessionvars(copy.copy(self.vars), self.start)
399 def __iter__(self):
399 def __iter__(self):
400 separator = self.start
400 separator = self.start
401 for key, value in sorted(self.vars.iteritems()):
401 for key, value in sorted(self.vars.iteritems()):
402 yield {'name': key, 'value': str(value), 'separator': separator}
402 yield {'name': key, 'value': str(value), 'separator': separator}
403 separator = '&'
403 separator = '&'
404
404
405 class wsgiui(ui.ui):
405 class wsgiui(ui.ui):
406 # default termwidth breaks under mod_wsgi
406 # default termwidth breaks under mod_wsgi
407 def termwidth(self):
407 def termwidth(self):
408 return 80
408 return 80
@@ -1,2461 +1,2461 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import lock, transaction, store, encoding
11 import lock, transaction, store, encoding
12 import scmutil, util, extensions, hook, error, revset
12 import scmutil, util, extensions, hook, error, revset
13 import match as matchmod
13 import match as matchmod
14 import merge as mergemod
14 import merge as mergemod
15 import tags as tagsmod
15 import tags as tagsmod
16 from lock import release
16 from lock import release
17 import weakref, errno, os, time, inspect
17 import weakref, errno, os, time, inspect
18 import branchmap
18 import branchmap, pathutil
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class repofilecache(filecache):
22 class repofilecache(filecache):
23 """All filecache usage on repo are done for logic that should be unfiltered
23 """All filecache usage on repo are done for logic that should be unfiltered
24 """
24 """
25
25
26 def __get__(self, repo, type=None):
26 def __get__(self, repo, type=None):
27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 def __set__(self, repo, value):
28 def __set__(self, repo, value):
29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 def __delete__(self, repo):
30 def __delete__(self, repo):
31 return super(repofilecache, self).__delete__(repo.unfiltered())
31 return super(repofilecache, self).__delete__(repo.unfiltered())
32
32
33 class storecache(repofilecache):
33 class storecache(repofilecache):
34 """filecache for files in the store"""
34 """filecache for files in the store"""
35 def join(self, obj, fname):
35 def join(self, obj, fname):
36 return obj.sjoin(fname)
36 return obj.sjoin(fname)
37
37
38 class unfilteredpropertycache(propertycache):
38 class unfilteredpropertycache(propertycache):
39 """propertycache that apply to unfiltered repo only"""
39 """propertycache that apply to unfiltered repo only"""
40
40
41 def __get__(self, repo, type=None):
41 def __get__(self, repo, type=None):
42 unfi = repo.unfiltered()
42 unfi = repo.unfiltered()
43 if unfi is repo:
43 if unfi is repo:
44 return super(unfilteredpropertycache, self).__get__(unfi)
44 return super(unfilteredpropertycache, self).__get__(unfi)
45 return getattr(unfi, self.name)
45 return getattr(unfi, self.name)
46
46
47 class filteredpropertycache(propertycache):
47 class filteredpropertycache(propertycache):
48 """propertycache that must take filtering in account"""
48 """propertycache that must take filtering in account"""
49
49
50 def cachevalue(self, obj, value):
50 def cachevalue(self, obj, value):
51 object.__setattr__(obj, self.name, value)
51 object.__setattr__(obj, self.name, value)
52
52
53
53
54 def hasunfilteredcache(repo, name):
54 def hasunfilteredcache(repo, name):
55 """check if a repo has an unfilteredpropertycache value for <name>"""
55 """check if a repo has an unfilteredpropertycache value for <name>"""
56 return name in vars(repo.unfiltered())
56 return name in vars(repo.unfiltered())
57
57
58 def unfilteredmethod(orig):
58 def unfilteredmethod(orig):
59 """decorate method that always need to be run on unfiltered version"""
59 """decorate method that always need to be run on unfiltered version"""
60 def wrapper(repo, *args, **kwargs):
60 def wrapper(repo, *args, **kwargs):
61 return orig(repo.unfiltered(), *args, **kwargs)
61 return orig(repo.unfiltered(), *args, **kwargs)
62 return wrapper
62 return wrapper
63
63
64 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
64 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
65 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
65 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
66
66
67 class localpeer(peer.peerrepository):
67 class localpeer(peer.peerrepository):
68 '''peer for a local repo; reflects only the most recent API'''
68 '''peer for a local repo; reflects only the most recent API'''
69
69
70 def __init__(self, repo, caps=MODERNCAPS):
70 def __init__(self, repo, caps=MODERNCAPS):
71 peer.peerrepository.__init__(self)
71 peer.peerrepository.__init__(self)
72 self._repo = repo.filtered('served')
72 self._repo = repo.filtered('served')
73 self.ui = repo.ui
73 self.ui = repo.ui
74 self._caps = repo._restrictcapabilities(caps)
74 self._caps = repo._restrictcapabilities(caps)
75 self.requirements = repo.requirements
75 self.requirements = repo.requirements
76 self.supportedformats = repo.supportedformats
76 self.supportedformats = repo.supportedformats
77
77
78 def close(self):
78 def close(self):
79 self._repo.close()
79 self._repo.close()
80
80
81 def _capabilities(self):
81 def _capabilities(self):
82 return self._caps
82 return self._caps
83
83
84 def local(self):
84 def local(self):
85 return self._repo
85 return self._repo
86
86
87 def canpush(self):
87 def canpush(self):
88 return True
88 return True
89
89
90 def url(self):
90 def url(self):
91 return self._repo.url()
91 return self._repo.url()
92
92
93 def lookup(self, key):
93 def lookup(self, key):
94 return self._repo.lookup(key)
94 return self._repo.lookup(key)
95
95
96 def branchmap(self):
96 def branchmap(self):
97 return self._repo.branchmap()
97 return self._repo.branchmap()
98
98
99 def heads(self):
99 def heads(self):
100 return self._repo.heads()
100 return self._repo.heads()
101
101
102 def known(self, nodes):
102 def known(self, nodes):
103 return self._repo.known(nodes)
103 return self._repo.known(nodes)
104
104
105 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
105 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
106 return self._repo.getbundle(source, heads=heads, common=common,
106 return self._repo.getbundle(source, heads=heads, common=common,
107 bundlecaps=None)
107 bundlecaps=None)
108
108
109 # TODO We might want to move the next two calls into legacypeer and add
109 # TODO We might want to move the next two calls into legacypeer and add
110 # unbundle instead.
110 # unbundle instead.
111
111
112 def lock(self):
112 def lock(self):
113 return self._repo.lock()
113 return self._repo.lock()
114
114
115 def addchangegroup(self, cg, source, url):
115 def addchangegroup(self, cg, source, url):
116 return self._repo.addchangegroup(cg, source, url)
116 return self._repo.addchangegroup(cg, source, url)
117
117
118 def pushkey(self, namespace, key, old, new):
118 def pushkey(self, namespace, key, old, new):
119 return self._repo.pushkey(namespace, key, old, new)
119 return self._repo.pushkey(namespace, key, old, new)
120
120
121 def listkeys(self, namespace):
121 def listkeys(self, namespace):
122 return self._repo.listkeys(namespace)
122 return self._repo.listkeys(namespace)
123
123
124 def debugwireargs(self, one, two, three=None, four=None, five=None):
124 def debugwireargs(self, one, two, three=None, four=None, five=None):
125 '''used to test argument passing over the wire'''
125 '''used to test argument passing over the wire'''
126 return "%s %s %s %s %s" % (one, two, three, four, five)
126 return "%s %s %s %s %s" % (one, two, three, four, five)
127
127
128 class locallegacypeer(localpeer):
128 class locallegacypeer(localpeer):
129 '''peer extension which implements legacy methods too; used for tests with
129 '''peer extension which implements legacy methods too; used for tests with
130 restricted capabilities'''
130 restricted capabilities'''
131
131
132 def __init__(self, repo):
132 def __init__(self, repo):
133 localpeer.__init__(self, repo, caps=LEGACYCAPS)
133 localpeer.__init__(self, repo, caps=LEGACYCAPS)
134
134
135 def branches(self, nodes):
135 def branches(self, nodes):
136 return self._repo.branches(nodes)
136 return self._repo.branches(nodes)
137
137
138 def between(self, pairs):
138 def between(self, pairs):
139 return self._repo.between(pairs)
139 return self._repo.between(pairs)
140
140
141 def changegroup(self, basenodes, source):
141 def changegroup(self, basenodes, source):
142 return self._repo.changegroup(basenodes, source)
142 return self._repo.changegroup(basenodes, source)
143
143
144 def changegroupsubset(self, bases, heads, source):
144 def changegroupsubset(self, bases, heads, source):
145 return self._repo.changegroupsubset(bases, heads, source)
145 return self._repo.changegroupsubset(bases, heads, source)
146
146
147 class localrepository(object):
147 class localrepository(object):
148
148
149 supportedformats = set(('revlogv1', 'generaldelta'))
149 supportedformats = set(('revlogv1', 'generaldelta'))
150 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
150 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
151 'dotencode'))
151 'dotencode'))
152 openerreqs = set(('revlogv1', 'generaldelta'))
152 openerreqs = set(('revlogv1', 'generaldelta'))
153 requirements = ['revlogv1']
153 requirements = ['revlogv1']
154 filtername = None
154 filtername = None
155
155
156 # a list of (ui, featureset) functions.
156 # a list of (ui, featureset) functions.
157 # only functions defined in module of enabled extensions are invoked
157 # only functions defined in module of enabled extensions are invoked
158 featuresetupfuncs = set()
158 featuresetupfuncs = set()
159
159
160 def _baserequirements(self, create):
160 def _baserequirements(self, create):
161 return self.requirements[:]
161 return self.requirements[:]
162
162
163 def __init__(self, baseui, path=None, create=False):
163 def __init__(self, baseui, path=None, create=False):
164 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
164 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
165 self.wopener = self.wvfs
165 self.wopener = self.wvfs
166 self.root = self.wvfs.base
166 self.root = self.wvfs.base
167 self.path = self.wvfs.join(".hg")
167 self.path = self.wvfs.join(".hg")
168 self.origroot = path
168 self.origroot = path
169 self.auditor = scmutil.pathauditor(self.root, self._checknested)
169 self.auditor = pathutil.pathauditor(self.root, self._checknested)
170 self.vfs = scmutil.vfs(self.path)
170 self.vfs = scmutil.vfs(self.path)
171 self.opener = self.vfs
171 self.opener = self.vfs
172 self.baseui = baseui
172 self.baseui = baseui
173 self.ui = baseui.copy()
173 self.ui = baseui.copy()
174 # A list of callback to shape the phase if no data were found.
174 # A list of callback to shape the phase if no data were found.
175 # Callback are in the form: func(repo, roots) --> processed root.
175 # Callback are in the form: func(repo, roots) --> processed root.
176 # This list it to be filled by extension during repo setup
176 # This list it to be filled by extension during repo setup
177 self._phasedefaults = []
177 self._phasedefaults = []
178 try:
178 try:
179 self.ui.readconfig(self.join("hgrc"), self.root)
179 self.ui.readconfig(self.join("hgrc"), self.root)
180 extensions.loadall(self.ui)
180 extensions.loadall(self.ui)
181 except IOError:
181 except IOError:
182 pass
182 pass
183
183
184 if self.featuresetupfuncs:
184 if self.featuresetupfuncs:
185 self.supported = set(self._basesupported) # use private copy
185 self.supported = set(self._basesupported) # use private copy
186 extmods = set(m.__name__ for n, m
186 extmods = set(m.__name__ for n, m
187 in extensions.extensions(self.ui))
187 in extensions.extensions(self.ui))
188 for setupfunc in self.featuresetupfuncs:
188 for setupfunc in self.featuresetupfuncs:
189 if setupfunc.__module__ in extmods:
189 if setupfunc.__module__ in extmods:
190 setupfunc(self.ui, self.supported)
190 setupfunc(self.ui, self.supported)
191 else:
191 else:
192 self.supported = self._basesupported
192 self.supported = self._basesupported
193
193
194 if not self.vfs.isdir():
194 if not self.vfs.isdir():
195 if create:
195 if create:
196 if not self.wvfs.exists():
196 if not self.wvfs.exists():
197 self.wvfs.makedirs()
197 self.wvfs.makedirs()
198 self.vfs.makedir(notindexed=True)
198 self.vfs.makedir(notindexed=True)
199 requirements = self._baserequirements(create)
199 requirements = self._baserequirements(create)
200 if self.ui.configbool('format', 'usestore', True):
200 if self.ui.configbool('format', 'usestore', True):
201 self.vfs.mkdir("store")
201 self.vfs.mkdir("store")
202 requirements.append("store")
202 requirements.append("store")
203 if self.ui.configbool('format', 'usefncache', True):
203 if self.ui.configbool('format', 'usefncache', True):
204 requirements.append("fncache")
204 requirements.append("fncache")
205 if self.ui.configbool('format', 'dotencode', True):
205 if self.ui.configbool('format', 'dotencode', True):
206 requirements.append('dotencode')
206 requirements.append('dotencode')
207 # create an invalid changelog
207 # create an invalid changelog
208 self.vfs.append(
208 self.vfs.append(
209 "00changelog.i",
209 "00changelog.i",
210 '\0\0\0\2' # represents revlogv2
210 '\0\0\0\2' # represents revlogv2
211 ' dummy changelog to prevent using the old repo layout'
211 ' dummy changelog to prevent using the old repo layout'
212 )
212 )
213 if self.ui.configbool('format', 'generaldelta', False):
213 if self.ui.configbool('format', 'generaldelta', False):
214 requirements.append("generaldelta")
214 requirements.append("generaldelta")
215 requirements = set(requirements)
215 requirements = set(requirements)
216 else:
216 else:
217 raise error.RepoError(_("repository %s not found") % path)
217 raise error.RepoError(_("repository %s not found") % path)
218 elif create:
218 elif create:
219 raise error.RepoError(_("repository %s already exists") % path)
219 raise error.RepoError(_("repository %s already exists") % path)
220 else:
220 else:
221 try:
221 try:
222 requirements = scmutil.readrequires(self.vfs, self.supported)
222 requirements = scmutil.readrequires(self.vfs, self.supported)
223 except IOError, inst:
223 except IOError, inst:
224 if inst.errno != errno.ENOENT:
224 if inst.errno != errno.ENOENT:
225 raise
225 raise
226 requirements = set()
226 requirements = set()
227
227
228 self.sharedpath = self.path
228 self.sharedpath = self.path
229 try:
229 try:
230 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
230 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
231 realpath=True)
231 realpath=True)
232 s = vfs.base
232 s = vfs.base
233 if not vfs.exists():
233 if not vfs.exists():
234 raise error.RepoError(
234 raise error.RepoError(
235 _('.hg/sharedpath points to nonexistent directory %s') % s)
235 _('.hg/sharedpath points to nonexistent directory %s') % s)
236 self.sharedpath = s
236 self.sharedpath = s
237 except IOError, inst:
237 except IOError, inst:
238 if inst.errno != errno.ENOENT:
238 if inst.errno != errno.ENOENT:
239 raise
239 raise
240
240
241 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
241 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
242 self.spath = self.store.path
242 self.spath = self.store.path
243 self.svfs = self.store.vfs
243 self.svfs = self.store.vfs
244 self.sopener = self.svfs
244 self.sopener = self.svfs
245 self.sjoin = self.store.join
245 self.sjoin = self.store.join
246 self.vfs.createmode = self.store.createmode
246 self.vfs.createmode = self.store.createmode
247 self._applyrequirements(requirements)
247 self._applyrequirements(requirements)
248 if create:
248 if create:
249 self._writerequirements()
249 self._writerequirements()
250
250
251
251
252 self._branchcaches = {}
252 self._branchcaches = {}
253 self.filterpats = {}
253 self.filterpats = {}
254 self._datafilters = {}
254 self._datafilters = {}
255 self._transref = self._lockref = self._wlockref = None
255 self._transref = self._lockref = self._wlockref = None
256
256
257 # A cache for various files under .hg/ that tracks file changes,
257 # A cache for various files under .hg/ that tracks file changes,
258 # (used by the filecache decorator)
258 # (used by the filecache decorator)
259 #
259 #
260 # Maps a property name to its util.filecacheentry
260 # Maps a property name to its util.filecacheentry
261 self._filecache = {}
261 self._filecache = {}
262
262
263 # hold sets of revision to be filtered
263 # hold sets of revision to be filtered
264 # should be cleared when something might have changed the filter value:
264 # should be cleared when something might have changed the filter value:
265 # - new changesets,
265 # - new changesets,
266 # - phase change,
266 # - phase change,
267 # - new obsolescence marker,
267 # - new obsolescence marker,
268 # - working directory parent change,
268 # - working directory parent change,
269 # - bookmark changes
269 # - bookmark changes
270 self.filteredrevcache = {}
270 self.filteredrevcache = {}
271
271
272 def close(self):
272 def close(self):
273 pass
273 pass
274
274
275 def _restrictcapabilities(self, caps):
275 def _restrictcapabilities(self, caps):
276 return caps
276 return caps
277
277
278 def _applyrequirements(self, requirements):
278 def _applyrequirements(self, requirements):
279 self.requirements = requirements
279 self.requirements = requirements
280 self.sopener.options = dict((r, 1) for r in requirements
280 self.sopener.options = dict((r, 1) for r in requirements
281 if r in self.openerreqs)
281 if r in self.openerreqs)
282
282
283 def _writerequirements(self):
283 def _writerequirements(self):
284 reqfile = self.opener("requires", "w")
284 reqfile = self.opener("requires", "w")
285 for r in sorted(self.requirements):
285 for r in sorted(self.requirements):
286 reqfile.write("%s\n" % r)
286 reqfile.write("%s\n" % r)
287 reqfile.close()
287 reqfile.close()
288
288
289 def _checknested(self, path):
289 def _checknested(self, path):
290 """Determine if path is a legal nested repository."""
290 """Determine if path is a legal nested repository."""
291 if not path.startswith(self.root):
291 if not path.startswith(self.root):
292 return False
292 return False
293 subpath = path[len(self.root) + 1:]
293 subpath = path[len(self.root) + 1:]
294 normsubpath = util.pconvert(subpath)
294 normsubpath = util.pconvert(subpath)
295
295
296 # XXX: Checking against the current working copy is wrong in
296 # XXX: Checking against the current working copy is wrong in
297 # the sense that it can reject things like
297 # the sense that it can reject things like
298 #
298 #
299 # $ hg cat -r 10 sub/x.txt
299 # $ hg cat -r 10 sub/x.txt
300 #
300 #
301 # if sub/ is no longer a subrepository in the working copy
301 # if sub/ is no longer a subrepository in the working copy
302 # parent revision.
302 # parent revision.
303 #
303 #
304 # However, it can of course also allow things that would have
304 # However, it can of course also allow things that would have
305 # been rejected before, such as the above cat command if sub/
305 # been rejected before, such as the above cat command if sub/
306 # is a subrepository now, but was a normal directory before.
306 # is a subrepository now, but was a normal directory before.
307 # The old path auditor would have rejected by mistake since it
307 # The old path auditor would have rejected by mistake since it
308 # panics when it sees sub/.hg/.
308 # panics when it sees sub/.hg/.
309 #
309 #
310 # All in all, checking against the working copy seems sensible
310 # All in all, checking against the working copy seems sensible
311 # since we want to prevent access to nested repositories on
311 # since we want to prevent access to nested repositories on
312 # the filesystem *now*.
312 # the filesystem *now*.
313 ctx = self[None]
313 ctx = self[None]
314 parts = util.splitpath(subpath)
314 parts = util.splitpath(subpath)
315 while parts:
315 while parts:
316 prefix = '/'.join(parts)
316 prefix = '/'.join(parts)
317 if prefix in ctx.substate:
317 if prefix in ctx.substate:
318 if prefix == normsubpath:
318 if prefix == normsubpath:
319 return True
319 return True
320 else:
320 else:
321 sub = ctx.sub(prefix)
321 sub = ctx.sub(prefix)
322 return sub.checknested(subpath[len(prefix) + 1:])
322 return sub.checknested(subpath[len(prefix) + 1:])
323 else:
323 else:
324 parts.pop()
324 parts.pop()
325 return False
325 return False
326
326
327 def peer(self):
327 def peer(self):
328 return localpeer(self) # not cached to avoid reference cycle
328 return localpeer(self) # not cached to avoid reference cycle
329
329
330 def unfiltered(self):
330 def unfiltered(self):
331 """Return unfiltered version of the repository
331 """Return unfiltered version of the repository
332
332
333 Intended to be overwritten by filtered repo."""
333 Intended to be overwritten by filtered repo."""
334 return self
334 return self
335
335
336 def filtered(self, name):
336 def filtered(self, name):
337 """Return a filtered version of a repository"""
337 """Return a filtered version of a repository"""
338 # build a new class with the mixin and the current class
338 # build a new class with the mixin and the current class
339 # (possibly subclass of the repo)
339 # (possibly subclass of the repo)
340 class proxycls(repoview.repoview, self.unfiltered().__class__):
340 class proxycls(repoview.repoview, self.unfiltered().__class__):
341 pass
341 pass
342 return proxycls(self, name)
342 return proxycls(self, name)
343
343
344 @repofilecache('bookmarks')
344 @repofilecache('bookmarks')
345 def _bookmarks(self):
345 def _bookmarks(self):
346 return bookmarks.bmstore(self)
346 return bookmarks.bmstore(self)
347
347
348 @repofilecache('bookmarks.current')
348 @repofilecache('bookmarks.current')
349 def _bookmarkcurrent(self):
349 def _bookmarkcurrent(self):
350 return bookmarks.readcurrent(self)
350 return bookmarks.readcurrent(self)
351
351
352 def bookmarkheads(self, bookmark):
352 def bookmarkheads(self, bookmark):
353 name = bookmark.split('@', 1)[0]
353 name = bookmark.split('@', 1)[0]
354 heads = []
354 heads = []
355 for mark, n in self._bookmarks.iteritems():
355 for mark, n in self._bookmarks.iteritems():
356 if mark.split('@', 1)[0] == name:
356 if mark.split('@', 1)[0] == name:
357 heads.append(n)
357 heads.append(n)
358 return heads
358 return heads
359
359
360 @storecache('phaseroots')
360 @storecache('phaseroots')
361 def _phasecache(self):
361 def _phasecache(self):
362 return phases.phasecache(self, self._phasedefaults)
362 return phases.phasecache(self, self._phasedefaults)
363
363
364 @storecache('obsstore')
364 @storecache('obsstore')
365 def obsstore(self):
365 def obsstore(self):
366 store = obsolete.obsstore(self.sopener)
366 store = obsolete.obsstore(self.sopener)
367 if store and not obsolete._enabled:
367 if store and not obsolete._enabled:
368 # message is rare enough to not be translated
368 # message is rare enough to not be translated
369 msg = 'obsolete feature not enabled but %i markers found!\n'
369 msg = 'obsolete feature not enabled but %i markers found!\n'
370 self.ui.warn(msg % len(list(store)))
370 self.ui.warn(msg % len(list(store)))
371 return store
371 return store
372
372
373 @storecache('00changelog.i')
373 @storecache('00changelog.i')
374 def changelog(self):
374 def changelog(self):
375 c = changelog.changelog(self.sopener)
375 c = changelog.changelog(self.sopener)
376 if 'HG_PENDING' in os.environ:
376 if 'HG_PENDING' in os.environ:
377 p = os.environ['HG_PENDING']
377 p = os.environ['HG_PENDING']
378 if p.startswith(self.root):
378 if p.startswith(self.root):
379 c.readpending('00changelog.i.a')
379 c.readpending('00changelog.i.a')
380 return c
380 return c
381
381
382 @storecache('00manifest.i')
382 @storecache('00manifest.i')
383 def manifest(self):
383 def manifest(self):
384 return manifest.manifest(self.sopener)
384 return manifest.manifest(self.sopener)
385
385
386 @repofilecache('dirstate')
386 @repofilecache('dirstate')
387 def dirstate(self):
387 def dirstate(self):
388 warned = [0]
388 warned = [0]
389 def validate(node):
389 def validate(node):
390 try:
390 try:
391 self.changelog.rev(node)
391 self.changelog.rev(node)
392 return node
392 return node
393 except error.LookupError:
393 except error.LookupError:
394 if not warned[0]:
394 if not warned[0]:
395 warned[0] = True
395 warned[0] = True
396 self.ui.warn(_("warning: ignoring unknown"
396 self.ui.warn(_("warning: ignoring unknown"
397 " working parent %s!\n") % short(node))
397 " working parent %s!\n") % short(node))
398 return nullid
398 return nullid
399
399
400 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
400 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
401
401
402 def __getitem__(self, changeid):
402 def __getitem__(self, changeid):
403 if changeid is None:
403 if changeid is None:
404 return context.workingctx(self)
404 return context.workingctx(self)
405 return context.changectx(self, changeid)
405 return context.changectx(self, changeid)
406
406
407 def __contains__(self, changeid):
407 def __contains__(self, changeid):
408 try:
408 try:
409 return bool(self.lookup(changeid))
409 return bool(self.lookup(changeid))
410 except error.RepoLookupError:
410 except error.RepoLookupError:
411 return False
411 return False
412
412
413 def __nonzero__(self):
413 def __nonzero__(self):
414 return True
414 return True
415
415
416 def __len__(self):
416 def __len__(self):
417 return len(self.changelog)
417 return len(self.changelog)
418
418
419 def __iter__(self):
419 def __iter__(self):
420 return iter(self.changelog)
420 return iter(self.changelog)
421
421
422 def revs(self, expr, *args):
422 def revs(self, expr, *args):
423 '''Return a list of revisions matching the given revset'''
423 '''Return a list of revisions matching the given revset'''
424 expr = revset.formatspec(expr, *args)
424 expr = revset.formatspec(expr, *args)
425 m = revset.match(None, expr)
425 m = revset.match(None, expr)
426 return [r for r in m(self, list(self))]
426 return [r for r in m(self, list(self))]
427
427
428 def set(self, expr, *args):
428 def set(self, expr, *args):
429 '''
429 '''
430 Yield a context for each matching revision, after doing arg
430 Yield a context for each matching revision, after doing arg
431 replacement via revset.formatspec
431 replacement via revset.formatspec
432 '''
432 '''
433 for r in self.revs(expr, *args):
433 for r in self.revs(expr, *args):
434 yield self[r]
434 yield self[r]
435
435
436 def url(self):
436 def url(self):
437 return 'file:' + self.root
437 return 'file:' + self.root
438
438
439 def hook(self, name, throw=False, **args):
439 def hook(self, name, throw=False, **args):
440 return hook.hook(self.ui, self, name, throw, **args)
440 return hook.hook(self.ui, self, name, throw, **args)
441
441
442 @unfilteredmethod
442 @unfilteredmethod
443 def _tag(self, names, node, message, local, user, date, extra={}):
443 def _tag(self, names, node, message, local, user, date, extra={}):
444 if isinstance(names, str):
444 if isinstance(names, str):
445 names = (names,)
445 names = (names,)
446
446
447 branches = self.branchmap()
447 branches = self.branchmap()
448 for name in names:
448 for name in names:
449 self.hook('pretag', throw=True, node=hex(node), tag=name,
449 self.hook('pretag', throw=True, node=hex(node), tag=name,
450 local=local)
450 local=local)
451 if name in branches:
451 if name in branches:
452 self.ui.warn(_("warning: tag %s conflicts with existing"
452 self.ui.warn(_("warning: tag %s conflicts with existing"
453 " branch name\n") % name)
453 " branch name\n") % name)
454
454
455 def writetags(fp, names, munge, prevtags):
455 def writetags(fp, names, munge, prevtags):
456 fp.seek(0, 2)
456 fp.seek(0, 2)
457 if prevtags and prevtags[-1] != '\n':
457 if prevtags and prevtags[-1] != '\n':
458 fp.write('\n')
458 fp.write('\n')
459 for name in names:
459 for name in names:
460 m = munge and munge(name) or name
460 m = munge and munge(name) or name
461 if (self._tagscache.tagtypes and
461 if (self._tagscache.tagtypes and
462 name in self._tagscache.tagtypes):
462 name in self._tagscache.tagtypes):
463 old = self.tags().get(name, nullid)
463 old = self.tags().get(name, nullid)
464 fp.write('%s %s\n' % (hex(old), m))
464 fp.write('%s %s\n' % (hex(old), m))
465 fp.write('%s %s\n' % (hex(node), m))
465 fp.write('%s %s\n' % (hex(node), m))
466 fp.close()
466 fp.close()
467
467
468 prevtags = ''
468 prevtags = ''
469 if local:
469 if local:
470 try:
470 try:
471 fp = self.opener('localtags', 'r+')
471 fp = self.opener('localtags', 'r+')
472 except IOError:
472 except IOError:
473 fp = self.opener('localtags', 'a')
473 fp = self.opener('localtags', 'a')
474 else:
474 else:
475 prevtags = fp.read()
475 prevtags = fp.read()
476
476
477 # local tags are stored in the current charset
477 # local tags are stored in the current charset
478 writetags(fp, names, None, prevtags)
478 writetags(fp, names, None, prevtags)
479 for name in names:
479 for name in names:
480 self.hook('tag', node=hex(node), tag=name, local=local)
480 self.hook('tag', node=hex(node), tag=name, local=local)
481 return
481 return
482
482
483 try:
483 try:
484 fp = self.wfile('.hgtags', 'rb+')
484 fp = self.wfile('.hgtags', 'rb+')
485 except IOError, e:
485 except IOError, e:
486 if e.errno != errno.ENOENT:
486 if e.errno != errno.ENOENT:
487 raise
487 raise
488 fp = self.wfile('.hgtags', 'ab')
488 fp = self.wfile('.hgtags', 'ab')
489 else:
489 else:
490 prevtags = fp.read()
490 prevtags = fp.read()
491
491
492 # committed tags are stored in UTF-8
492 # committed tags are stored in UTF-8
493 writetags(fp, names, encoding.fromlocal, prevtags)
493 writetags(fp, names, encoding.fromlocal, prevtags)
494
494
495 fp.close()
495 fp.close()
496
496
497 self.invalidatecaches()
497 self.invalidatecaches()
498
498
499 if '.hgtags' not in self.dirstate:
499 if '.hgtags' not in self.dirstate:
500 self[None].add(['.hgtags'])
500 self[None].add(['.hgtags'])
501
501
502 m = matchmod.exact(self.root, '', ['.hgtags'])
502 m = matchmod.exact(self.root, '', ['.hgtags'])
503 tagnode = self.commit(message, user, date, extra=extra, match=m)
503 tagnode = self.commit(message, user, date, extra=extra, match=m)
504
504
505 for name in names:
505 for name in names:
506 self.hook('tag', node=hex(node), tag=name, local=local)
506 self.hook('tag', node=hex(node), tag=name, local=local)
507
507
508 return tagnode
508 return tagnode
509
509
510 def tag(self, names, node, message, local, user, date):
510 def tag(self, names, node, message, local, user, date):
511 '''tag a revision with one or more symbolic names.
511 '''tag a revision with one or more symbolic names.
512
512
513 names is a list of strings or, when adding a single tag, names may be a
513 names is a list of strings or, when adding a single tag, names may be a
514 string.
514 string.
515
515
516 if local is True, the tags are stored in a per-repository file.
516 if local is True, the tags are stored in a per-repository file.
517 otherwise, they are stored in the .hgtags file, and a new
517 otherwise, they are stored in the .hgtags file, and a new
518 changeset is committed with the change.
518 changeset is committed with the change.
519
519
520 keyword arguments:
520 keyword arguments:
521
521
522 local: whether to store tags in non-version-controlled file
522 local: whether to store tags in non-version-controlled file
523 (default False)
523 (default False)
524
524
525 message: commit message to use if committing
525 message: commit message to use if committing
526
526
527 user: name of user to use if committing
527 user: name of user to use if committing
528
528
529 date: date tuple to use if committing'''
529 date: date tuple to use if committing'''
530
530
531 if not local:
531 if not local:
532 for x in self.status()[:5]:
532 for x in self.status()[:5]:
533 if '.hgtags' in x:
533 if '.hgtags' in x:
534 raise util.Abort(_('working copy of .hgtags is changed '
534 raise util.Abort(_('working copy of .hgtags is changed '
535 '(please commit .hgtags manually)'))
535 '(please commit .hgtags manually)'))
536
536
537 self.tags() # instantiate the cache
537 self.tags() # instantiate the cache
538 self._tag(names, node, message, local, user, date)
538 self._tag(names, node, message, local, user, date)
539
539
540 @filteredpropertycache
540 @filteredpropertycache
541 def _tagscache(self):
541 def _tagscache(self):
542 '''Returns a tagscache object that contains various tags related
542 '''Returns a tagscache object that contains various tags related
543 caches.'''
543 caches.'''
544
544
545 # This simplifies its cache management by having one decorated
545 # This simplifies its cache management by having one decorated
546 # function (this one) and the rest simply fetch things from it.
546 # function (this one) and the rest simply fetch things from it.
547 class tagscache(object):
547 class tagscache(object):
548 def __init__(self):
548 def __init__(self):
549 # These two define the set of tags for this repository. tags
549 # These two define the set of tags for this repository. tags
550 # maps tag name to node; tagtypes maps tag name to 'global' or
550 # maps tag name to node; tagtypes maps tag name to 'global' or
551 # 'local'. (Global tags are defined by .hgtags across all
551 # 'local'. (Global tags are defined by .hgtags across all
552 # heads, and local tags are defined in .hg/localtags.)
552 # heads, and local tags are defined in .hg/localtags.)
553 # They constitute the in-memory cache of tags.
553 # They constitute the in-memory cache of tags.
554 self.tags = self.tagtypes = None
554 self.tags = self.tagtypes = None
555
555
556 self.nodetagscache = self.tagslist = None
556 self.nodetagscache = self.tagslist = None
557
557
558 cache = tagscache()
558 cache = tagscache()
559 cache.tags, cache.tagtypes = self._findtags()
559 cache.tags, cache.tagtypes = self._findtags()
560
560
561 return cache
561 return cache
562
562
563 def tags(self):
563 def tags(self):
564 '''return a mapping of tag to node'''
564 '''return a mapping of tag to node'''
565 t = {}
565 t = {}
566 if self.changelog.filteredrevs:
566 if self.changelog.filteredrevs:
567 tags, tt = self._findtags()
567 tags, tt = self._findtags()
568 else:
568 else:
569 tags = self._tagscache.tags
569 tags = self._tagscache.tags
570 for k, v in tags.iteritems():
570 for k, v in tags.iteritems():
571 try:
571 try:
572 # ignore tags to unknown nodes
572 # ignore tags to unknown nodes
573 self.changelog.rev(v)
573 self.changelog.rev(v)
574 t[k] = v
574 t[k] = v
575 except (error.LookupError, ValueError):
575 except (error.LookupError, ValueError):
576 pass
576 pass
577 return t
577 return t
578
578
579 def _findtags(self):
579 def _findtags(self):
580 '''Do the hard work of finding tags. Return a pair of dicts
580 '''Do the hard work of finding tags. Return a pair of dicts
581 (tags, tagtypes) where tags maps tag name to node, and tagtypes
581 (tags, tagtypes) where tags maps tag name to node, and tagtypes
582 maps tag name to a string like \'global\' or \'local\'.
582 maps tag name to a string like \'global\' or \'local\'.
583 Subclasses or extensions are free to add their own tags, but
583 Subclasses or extensions are free to add their own tags, but
584 should be aware that the returned dicts will be retained for the
584 should be aware that the returned dicts will be retained for the
585 duration of the localrepo object.'''
585 duration of the localrepo object.'''
586
586
587 # XXX what tagtype should subclasses/extensions use? Currently
587 # XXX what tagtype should subclasses/extensions use? Currently
588 # mq and bookmarks add tags, but do not set the tagtype at all.
588 # mq and bookmarks add tags, but do not set the tagtype at all.
589 # Should each extension invent its own tag type? Should there
589 # Should each extension invent its own tag type? Should there
590 # be one tagtype for all such "virtual" tags? Or is the status
590 # be one tagtype for all such "virtual" tags? Or is the status
591 # quo fine?
591 # quo fine?
592
592
593 alltags = {} # map tag name to (node, hist)
593 alltags = {} # map tag name to (node, hist)
594 tagtypes = {}
594 tagtypes = {}
595
595
596 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
596 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
597 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
597 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
598
598
599 # Build the return dicts. Have to re-encode tag names because
599 # Build the return dicts. Have to re-encode tag names because
600 # the tags module always uses UTF-8 (in order not to lose info
600 # the tags module always uses UTF-8 (in order not to lose info
601 # writing to the cache), but the rest of Mercurial wants them in
601 # writing to the cache), but the rest of Mercurial wants them in
602 # local encoding.
602 # local encoding.
603 tags = {}
603 tags = {}
604 for (name, (node, hist)) in alltags.iteritems():
604 for (name, (node, hist)) in alltags.iteritems():
605 if node != nullid:
605 if node != nullid:
606 tags[encoding.tolocal(name)] = node
606 tags[encoding.tolocal(name)] = node
607 tags['tip'] = self.changelog.tip()
607 tags['tip'] = self.changelog.tip()
608 tagtypes = dict([(encoding.tolocal(name), value)
608 tagtypes = dict([(encoding.tolocal(name), value)
609 for (name, value) in tagtypes.iteritems()])
609 for (name, value) in tagtypes.iteritems()])
610 return (tags, tagtypes)
610 return (tags, tagtypes)
611
611
612 def tagtype(self, tagname):
612 def tagtype(self, tagname):
613 '''
613 '''
614 return the type of the given tag. result can be:
614 return the type of the given tag. result can be:
615
615
616 'local' : a local tag
616 'local' : a local tag
617 'global' : a global tag
617 'global' : a global tag
618 None : tag does not exist
618 None : tag does not exist
619 '''
619 '''
620
620
621 return self._tagscache.tagtypes.get(tagname)
621 return self._tagscache.tagtypes.get(tagname)
622
622
623 def tagslist(self):
623 def tagslist(self):
624 '''return a list of tags ordered by revision'''
624 '''return a list of tags ordered by revision'''
625 if not self._tagscache.tagslist:
625 if not self._tagscache.tagslist:
626 l = []
626 l = []
627 for t, n in self.tags().iteritems():
627 for t, n in self.tags().iteritems():
628 r = self.changelog.rev(n)
628 r = self.changelog.rev(n)
629 l.append((r, t, n))
629 l.append((r, t, n))
630 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
630 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
631
631
632 return self._tagscache.tagslist
632 return self._tagscache.tagslist
633
633
634 def nodetags(self, node):
634 def nodetags(self, node):
635 '''return the tags associated with a node'''
635 '''return the tags associated with a node'''
636 if not self._tagscache.nodetagscache:
636 if not self._tagscache.nodetagscache:
637 nodetagscache = {}
637 nodetagscache = {}
638 for t, n in self._tagscache.tags.iteritems():
638 for t, n in self._tagscache.tags.iteritems():
639 nodetagscache.setdefault(n, []).append(t)
639 nodetagscache.setdefault(n, []).append(t)
640 for tags in nodetagscache.itervalues():
640 for tags in nodetagscache.itervalues():
641 tags.sort()
641 tags.sort()
642 self._tagscache.nodetagscache = nodetagscache
642 self._tagscache.nodetagscache = nodetagscache
643 return self._tagscache.nodetagscache.get(node, [])
643 return self._tagscache.nodetagscache.get(node, [])
644
644
645 def nodebookmarks(self, node):
645 def nodebookmarks(self, node):
646 marks = []
646 marks = []
647 for bookmark, n in self._bookmarks.iteritems():
647 for bookmark, n in self._bookmarks.iteritems():
648 if n == node:
648 if n == node:
649 marks.append(bookmark)
649 marks.append(bookmark)
650 return sorted(marks)
650 return sorted(marks)
651
651
652 def branchmap(self):
652 def branchmap(self):
653 '''returns a dictionary {branch: [branchheads]}'''
653 '''returns a dictionary {branch: [branchheads]}'''
654 branchmap.updatecache(self)
654 branchmap.updatecache(self)
655 return self._branchcaches[self.filtername]
655 return self._branchcaches[self.filtername]
656
656
657
657
658 def _branchtip(self, heads):
658 def _branchtip(self, heads):
659 '''return the tipmost branch head in heads'''
659 '''return the tipmost branch head in heads'''
660 tip = heads[-1]
660 tip = heads[-1]
661 for h in reversed(heads):
661 for h in reversed(heads):
662 if not self[h].closesbranch():
662 if not self[h].closesbranch():
663 tip = h
663 tip = h
664 break
664 break
665 return tip
665 return tip
666
666
667 def branchtip(self, branch):
667 def branchtip(self, branch):
668 '''return the tip node for a given branch'''
668 '''return the tip node for a given branch'''
669 if branch not in self.branchmap():
669 if branch not in self.branchmap():
670 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
670 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
671 return self._branchtip(self.branchmap()[branch])
671 return self._branchtip(self.branchmap()[branch])
672
672
673 def branchtags(self):
673 def branchtags(self):
674 '''return a dict where branch names map to the tipmost head of
674 '''return a dict where branch names map to the tipmost head of
675 the branch, open heads come before closed'''
675 the branch, open heads come before closed'''
676 bt = {}
676 bt = {}
677 for bn, heads in self.branchmap().iteritems():
677 for bn, heads in self.branchmap().iteritems():
678 bt[bn] = self._branchtip(heads)
678 bt[bn] = self._branchtip(heads)
679 return bt
679 return bt
680
680
681 def lookup(self, key):
681 def lookup(self, key):
682 return self[key].node()
682 return self[key].node()
683
683
684 def lookupbranch(self, key, remote=None):
684 def lookupbranch(self, key, remote=None):
685 repo = remote or self
685 repo = remote or self
686 if key in repo.branchmap():
686 if key in repo.branchmap():
687 return key
687 return key
688
688
689 repo = (remote and remote.local()) and remote or self
689 repo = (remote and remote.local()) and remote or self
690 return repo[key].branch()
690 return repo[key].branch()
691
691
692 def known(self, nodes):
692 def known(self, nodes):
693 nm = self.changelog.nodemap
693 nm = self.changelog.nodemap
694 pc = self._phasecache
694 pc = self._phasecache
695 result = []
695 result = []
696 for n in nodes:
696 for n in nodes:
697 r = nm.get(n)
697 r = nm.get(n)
698 resp = not (r is None or pc.phase(self, r) >= phases.secret)
698 resp = not (r is None or pc.phase(self, r) >= phases.secret)
699 result.append(resp)
699 result.append(resp)
700 return result
700 return result
701
701
702 def local(self):
702 def local(self):
703 return self
703 return self
704
704
705 def cancopy(self):
705 def cancopy(self):
706 return self.local() # so statichttprepo's override of local() works
706 return self.local() # so statichttprepo's override of local() works
707
707
708 def join(self, f):
708 def join(self, f):
709 return os.path.join(self.path, f)
709 return os.path.join(self.path, f)
710
710
711 def wjoin(self, f):
711 def wjoin(self, f):
712 return os.path.join(self.root, f)
712 return os.path.join(self.root, f)
713
713
714 def file(self, f):
714 def file(self, f):
715 if f[0] == '/':
715 if f[0] == '/':
716 f = f[1:]
716 f = f[1:]
717 return filelog.filelog(self.sopener, f)
717 return filelog.filelog(self.sopener, f)
718
718
719 def changectx(self, changeid):
719 def changectx(self, changeid):
720 return self[changeid]
720 return self[changeid]
721
721
722 def parents(self, changeid=None):
722 def parents(self, changeid=None):
723 '''get list of changectxs for parents of changeid'''
723 '''get list of changectxs for parents of changeid'''
724 return self[changeid].parents()
724 return self[changeid].parents()
725
725
726 def setparents(self, p1, p2=nullid):
726 def setparents(self, p1, p2=nullid):
727 copies = self.dirstate.setparents(p1, p2)
727 copies = self.dirstate.setparents(p1, p2)
728 pctx = self[p1]
728 pctx = self[p1]
729 if copies:
729 if copies:
730 # Adjust copy records, the dirstate cannot do it, it
730 # Adjust copy records, the dirstate cannot do it, it
731 # requires access to parents manifests. Preserve them
731 # requires access to parents manifests. Preserve them
732 # only for entries added to first parent.
732 # only for entries added to first parent.
733 for f in copies:
733 for f in copies:
734 if f not in pctx and copies[f] in pctx:
734 if f not in pctx and copies[f] in pctx:
735 self.dirstate.copy(copies[f], f)
735 self.dirstate.copy(copies[f], f)
736 if p2 == nullid:
736 if p2 == nullid:
737 for f, s in sorted(self.dirstate.copies().items()):
737 for f, s in sorted(self.dirstate.copies().items()):
738 if f not in pctx and s not in pctx:
738 if f not in pctx and s not in pctx:
739 self.dirstate.copy(None, f)
739 self.dirstate.copy(None, f)
740
740
741 def filectx(self, path, changeid=None, fileid=None):
741 def filectx(self, path, changeid=None, fileid=None):
742 """changeid can be a changeset revision, node, or tag.
742 """changeid can be a changeset revision, node, or tag.
743 fileid can be a file revision or node."""
743 fileid can be a file revision or node."""
744 return context.filectx(self, path, changeid, fileid)
744 return context.filectx(self, path, changeid, fileid)
745
745
746 def getcwd(self):
746 def getcwd(self):
747 return self.dirstate.getcwd()
747 return self.dirstate.getcwd()
748
748
749 def pathto(self, f, cwd=None):
749 def pathto(self, f, cwd=None):
750 return self.dirstate.pathto(f, cwd)
750 return self.dirstate.pathto(f, cwd)
751
751
752 def wfile(self, f, mode='r'):
752 def wfile(self, f, mode='r'):
753 return self.wopener(f, mode)
753 return self.wopener(f, mode)
754
754
755 def _link(self, f):
755 def _link(self, f):
756 return self.wvfs.islink(f)
756 return self.wvfs.islink(f)
757
757
758 def _loadfilter(self, filter):
758 def _loadfilter(self, filter):
759 if filter not in self.filterpats:
759 if filter not in self.filterpats:
760 l = []
760 l = []
761 for pat, cmd in self.ui.configitems(filter):
761 for pat, cmd in self.ui.configitems(filter):
762 if cmd == '!':
762 if cmd == '!':
763 continue
763 continue
764 mf = matchmod.match(self.root, '', [pat])
764 mf = matchmod.match(self.root, '', [pat])
765 fn = None
765 fn = None
766 params = cmd
766 params = cmd
767 for name, filterfn in self._datafilters.iteritems():
767 for name, filterfn in self._datafilters.iteritems():
768 if cmd.startswith(name):
768 if cmd.startswith(name):
769 fn = filterfn
769 fn = filterfn
770 params = cmd[len(name):].lstrip()
770 params = cmd[len(name):].lstrip()
771 break
771 break
772 if not fn:
772 if not fn:
773 fn = lambda s, c, **kwargs: util.filter(s, c)
773 fn = lambda s, c, **kwargs: util.filter(s, c)
774 # Wrap old filters not supporting keyword arguments
774 # Wrap old filters not supporting keyword arguments
775 if not inspect.getargspec(fn)[2]:
775 if not inspect.getargspec(fn)[2]:
776 oldfn = fn
776 oldfn = fn
777 fn = lambda s, c, **kwargs: oldfn(s, c)
777 fn = lambda s, c, **kwargs: oldfn(s, c)
778 l.append((mf, fn, params))
778 l.append((mf, fn, params))
779 self.filterpats[filter] = l
779 self.filterpats[filter] = l
780 return self.filterpats[filter]
780 return self.filterpats[filter]
781
781
782 def _filter(self, filterpats, filename, data):
782 def _filter(self, filterpats, filename, data):
783 for mf, fn, cmd in filterpats:
783 for mf, fn, cmd in filterpats:
784 if mf(filename):
784 if mf(filename):
785 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
785 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
786 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
786 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
787 break
787 break
788
788
789 return data
789 return data
790
790
791 @unfilteredpropertycache
791 @unfilteredpropertycache
792 def _encodefilterpats(self):
792 def _encodefilterpats(self):
793 return self._loadfilter('encode')
793 return self._loadfilter('encode')
794
794
795 @unfilteredpropertycache
795 @unfilteredpropertycache
796 def _decodefilterpats(self):
796 def _decodefilterpats(self):
797 return self._loadfilter('decode')
797 return self._loadfilter('decode')
798
798
799 def adddatafilter(self, name, filter):
799 def adddatafilter(self, name, filter):
800 self._datafilters[name] = filter
800 self._datafilters[name] = filter
801
801
802 def wread(self, filename):
802 def wread(self, filename):
803 if self._link(filename):
803 if self._link(filename):
804 data = self.wvfs.readlink(filename)
804 data = self.wvfs.readlink(filename)
805 else:
805 else:
806 data = self.wopener.read(filename)
806 data = self.wopener.read(filename)
807 return self._filter(self._encodefilterpats, filename, data)
807 return self._filter(self._encodefilterpats, filename, data)
808
808
809 def wwrite(self, filename, data, flags):
809 def wwrite(self, filename, data, flags):
810 data = self._filter(self._decodefilterpats, filename, data)
810 data = self._filter(self._decodefilterpats, filename, data)
811 if 'l' in flags:
811 if 'l' in flags:
812 self.wopener.symlink(data, filename)
812 self.wopener.symlink(data, filename)
813 else:
813 else:
814 self.wopener.write(filename, data)
814 self.wopener.write(filename, data)
815 if 'x' in flags:
815 if 'x' in flags:
816 self.wvfs.setflags(filename, False, True)
816 self.wvfs.setflags(filename, False, True)
817
817
818 def wwritedata(self, filename, data):
818 def wwritedata(self, filename, data):
819 return self._filter(self._decodefilterpats, filename, data)
819 return self._filter(self._decodefilterpats, filename, data)
820
820
821 def transaction(self, desc, report=None):
821 def transaction(self, desc, report=None):
822 tr = self._transref and self._transref() or None
822 tr = self._transref and self._transref() or None
823 if tr and tr.running():
823 if tr and tr.running():
824 return tr.nest()
824 return tr.nest()
825
825
826 # abort here if the journal already exists
826 # abort here if the journal already exists
827 if self.svfs.exists("journal"):
827 if self.svfs.exists("journal"):
828 raise error.RepoError(
828 raise error.RepoError(
829 _("abandoned transaction found - run hg recover"))
829 _("abandoned transaction found - run hg recover"))
830
830
831 self._writejournal(desc)
831 self._writejournal(desc)
832 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
832 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
833 rp = report and report or self.ui.warn
833 rp = report and report or self.ui.warn
834 tr = transaction.transaction(rp, self.sopener,
834 tr = transaction.transaction(rp, self.sopener,
835 self.sjoin("journal"),
835 self.sjoin("journal"),
836 aftertrans(renames),
836 aftertrans(renames),
837 self.store.createmode)
837 self.store.createmode)
838 self._transref = weakref.ref(tr)
838 self._transref = weakref.ref(tr)
839 return tr
839 return tr
840
840
841 def _journalfiles(self):
841 def _journalfiles(self):
842 return ((self.svfs, 'journal'),
842 return ((self.svfs, 'journal'),
843 (self.vfs, 'journal.dirstate'),
843 (self.vfs, 'journal.dirstate'),
844 (self.vfs, 'journal.branch'),
844 (self.vfs, 'journal.branch'),
845 (self.vfs, 'journal.desc'),
845 (self.vfs, 'journal.desc'),
846 (self.vfs, 'journal.bookmarks'),
846 (self.vfs, 'journal.bookmarks'),
847 (self.svfs, 'journal.phaseroots'))
847 (self.svfs, 'journal.phaseroots'))
848
848
849 def undofiles(self):
849 def undofiles(self):
850 return [vfs.join(undoname(x)) for vfs, x in self._journalfiles()]
850 return [vfs.join(undoname(x)) for vfs, x in self._journalfiles()]
851
851
852 def _writejournal(self, desc):
852 def _writejournal(self, desc):
853 self.opener.write("journal.dirstate",
853 self.opener.write("journal.dirstate",
854 self.opener.tryread("dirstate"))
854 self.opener.tryread("dirstate"))
855 self.opener.write("journal.branch",
855 self.opener.write("journal.branch",
856 encoding.fromlocal(self.dirstate.branch()))
856 encoding.fromlocal(self.dirstate.branch()))
857 self.opener.write("journal.desc",
857 self.opener.write("journal.desc",
858 "%d\n%s\n" % (len(self), desc))
858 "%d\n%s\n" % (len(self), desc))
859 self.opener.write("journal.bookmarks",
859 self.opener.write("journal.bookmarks",
860 self.opener.tryread("bookmarks"))
860 self.opener.tryread("bookmarks"))
861 self.sopener.write("journal.phaseroots",
861 self.sopener.write("journal.phaseroots",
862 self.sopener.tryread("phaseroots"))
862 self.sopener.tryread("phaseroots"))
863
863
864 def recover(self):
864 def recover(self):
865 lock = self.lock()
865 lock = self.lock()
866 try:
866 try:
867 if self.svfs.exists("journal"):
867 if self.svfs.exists("journal"):
868 self.ui.status(_("rolling back interrupted transaction\n"))
868 self.ui.status(_("rolling back interrupted transaction\n"))
869 transaction.rollback(self.sopener, self.sjoin("journal"),
869 transaction.rollback(self.sopener, self.sjoin("journal"),
870 self.ui.warn)
870 self.ui.warn)
871 self.invalidate()
871 self.invalidate()
872 return True
872 return True
873 else:
873 else:
874 self.ui.warn(_("no interrupted transaction available\n"))
874 self.ui.warn(_("no interrupted transaction available\n"))
875 return False
875 return False
876 finally:
876 finally:
877 lock.release()
877 lock.release()
878
878
879 def rollback(self, dryrun=False, force=False):
879 def rollback(self, dryrun=False, force=False):
880 wlock = lock = None
880 wlock = lock = None
881 try:
881 try:
882 wlock = self.wlock()
882 wlock = self.wlock()
883 lock = self.lock()
883 lock = self.lock()
884 if self.svfs.exists("undo"):
884 if self.svfs.exists("undo"):
885 return self._rollback(dryrun, force)
885 return self._rollback(dryrun, force)
886 else:
886 else:
887 self.ui.warn(_("no rollback information available\n"))
887 self.ui.warn(_("no rollback information available\n"))
888 return 1
888 return 1
889 finally:
889 finally:
890 release(lock, wlock)
890 release(lock, wlock)
891
891
892 @unfilteredmethod # Until we get smarter cache management
892 @unfilteredmethod # Until we get smarter cache management
893 def _rollback(self, dryrun, force):
893 def _rollback(self, dryrun, force):
894 ui = self.ui
894 ui = self.ui
895 try:
895 try:
896 args = self.opener.read('undo.desc').splitlines()
896 args = self.opener.read('undo.desc').splitlines()
897 (oldlen, desc, detail) = (int(args[0]), args[1], None)
897 (oldlen, desc, detail) = (int(args[0]), args[1], None)
898 if len(args) >= 3:
898 if len(args) >= 3:
899 detail = args[2]
899 detail = args[2]
900 oldtip = oldlen - 1
900 oldtip = oldlen - 1
901
901
902 if detail and ui.verbose:
902 if detail and ui.verbose:
903 msg = (_('repository tip rolled back to revision %s'
903 msg = (_('repository tip rolled back to revision %s'
904 ' (undo %s: %s)\n')
904 ' (undo %s: %s)\n')
905 % (oldtip, desc, detail))
905 % (oldtip, desc, detail))
906 else:
906 else:
907 msg = (_('repository tip rolled back to revision %s'
907 msg = (_('repository tip rolled back to revision %s'
908 ' (undo %s)\n')
908 ' (undo %s)\n')
909 % (oldtip, desc))
909 % (oldtip, desc))
910 except IOError:
910 except IOError:
911 msg = _('rolling back unknown transaction\n')
911 msg = _('rolling back unknown transaction\n')
912 desc = None
912 desc = None
913
913
914 if not force and self['.'] != self['tip'] and desc == 'commit':
914 if not force and self['.'] != self['tip'] and desc == 'commit':
915 raise util.Abort(
915 raise util.Abort(
916 _('rollback of last commit while not checked out '
916 _('rollback of last commit while not checked out '
917 'may lose data'), hint=_('use -f to force'))
917 'may lose data'), hint=_('use -f to force'))
918
918
919 ui.status(msg)
919 ui.status(msg)
920 if dryrun:
920 if dryrun:
921 return 0
921 return 0
922
922
923 parents = self.dirstate.parents()
923 parents = self.dirstate.parents()
924 self.destroying()
924 self.destroying()
925 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
925 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
926 if self.vfs.exists('undo.bookmarks'):
926 if self.vfs.exists('undo.bookmarks'):
927 self.vfs.rename('undo.bookmarks', 'bookmarks')
927 self.vfs.rename('undo.bookmarks', 'bookmarks')
928 if self.svfs.exists('undo.phaseroots'):
928 if self.svfs.exists('undo.phaseroots'):
929 self.svfs.rename('undo.phaseroots', 'phaseroots')
929 self.svfs.rename('undo.phaseroots', 'phaseroots')
930 self.invalidate()
930 self.invalidate()
931
931
932 parentgone = (parents[0] not in self.changelog.nodemap or
932 parentgone = (parents[0] not in self.changelog.nodemap or
933 parents[1] not in self.changelog.nodemap)
933 parents[1] not in self.changelog.nodemap)
934 if parentgone:
934 if parentgone:
935 self.vfs.rename('undo.dirstate', 'dirstate')
935 self.vfs.rename('undo.dirstate', 'dirstate')
936 try:
936 try:
937 branch = self.opener.read('undo.branch')
937 branch = self.opener.read('undo.branch')
938 self.dirstate.setbranch(encoding.tolocal(branch))
938 self.dirstate.setbranch(encoding.tolocal(branch))
939 except IOError:
939 except IOError:
940 ui.warn(_('named branch could not be reset: '
940 ui.warn(_('named branch could not be reset: '
941 'current branch is still \'%s\'\n')
941 'current branch is still \'%s\'\n')
942 % self.dirstate.branch())
942 % self.dirstate.branch())
943
943
944 self.dirstate.invalidate()
944 self.dirstate.invalidate()
945 parents = tuple([p.rev() for p in self.parents()])
945 parents = tuple([p.rev() for p in self.parents()])
946 if len(parents) > 1:
946 if len(parents) > 1:
947 ui.status(_('working directory now based on '
947 ui.status(_('working directory now based on '
948 'revisions %d and %d\n') % parents)
948 'revisions %d and %d\n') % parents)
949 else:
949 else:
950 ui.status(_('working directory now based on '
950 ui.status(_('working directory now based on '
951 'revision %d\n') % parents)
951 'revision %d\n') % parents)
952 # TODO: if we know which new heads may result from this rollback, pass
952 # TODO: if we know which new heads may result from this rollback, pass
953 # them to destroy(), which will prevent the branchhead cache from being
953 # them to destroy(), which will prevent the branchhead cache from being
954 # invalidated.
954 # invalidated.
955 self.destroyed()
955 self.destroyed()
956 return 0
956 return 0
957
957
958 def invalidatecaches(self):
958 def invalidatecaches(self):
959
959
960 if '_tagscache' in vars(self):
960 if '_tagscache' in vars(self):
961 # can't use delattr on proxy
961 # can't use delattr on proxy
962 del self.__dict__['_tagscache']
962 del self.__dict__['_tagscache']
963
963
964 self.unfiltered()._branchcaches.clear()
964 self.unfiltered()._branchcaches.clear()
965 self.invalidatevolatilesets()
965 self.invalidatevolatilesets()
966
966
967 def invalidatevolatilesets(self):
967 def invalidatevolatilesets(self):
968 self.filteredrevcache.clear()
968 self.filteredrevcache.clear()
969 obsolete.clearobscaches(self)
969 obsolete.clearobscaches(self)
970
970
971 def invalidatedirstate(self):
971 def invalidatedirstate(self):
972 '''Invalidates the dirstate, causing the next call to dirstate
972 '''Invalidates the dirstate, causing the next call to dirstate
973 to check if it was modified since the last time it was read,
973 to check if it was modified since the last time it was read,
974 rereading it if it has.
974 rereading it if it has.
975
975
976 This is different to dirstate.invalidate() that it doesn't always
976 This is different to dirstate.invalidate() that it doesn't always
977 rereads the dirstate. Use dirstate.invalidate() if you want to
977 rereads the dirstate. Use dirstate.invalidate() if you want to
978 explicitly read the dirstate again (i.e. restoring it to a previous
978 explicitly read the dirstate again (i.e. restoring it to a previous
979 known good state).'''
979 known good state).'''
980 if hasunfilteredcache(self, 'dirstate'):
980 if hasunfilteredcache(self, 'dirstate'):
981 for k in self.dirstate._filecache:
981 for k in self.dirstate._filecache:
982 try:
982 try:
983 delattr(self.dirstate, k)
983 delattr(self.dirstate, k)
984 except AttributeError:
984 except AttributeError:
985 pass
985 pass
986 delattr(self.unfiltered(), 'dirstate')
986 delattr(self.unfiltered(), 'dirstate')
987
987
988 def invalidate(self):
988 def invalidate(self):
989 unfiltered = self.unfiltered() # all file caches are stored unfiltered
989 unfiltered = self.unfiltered() # all file caches are stored unfiltered
990 for k in self._filecache:
990 for k in self._filecache:
991 # dirstate is invalidated separately in invalidatedirstate()
991 # dirstate is invalidated separately in invalidatedirstate()
992 if k == 'dirstate':
992 if k == 'dirstate':
993 continue
993 continue
994
994
995 try:
995 try:
996 delattr(unfiltered, k)
996 delattr(unfiltered, k)
997 except AttributeError:
997 except AttributeError:
998 pass
998 pass
999 self.invalidatecaches()
999 self.invalidatecaches()
1000
1000
1001 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1001 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1002 try:
1002 try:
1003 l = lock.lock(lockname, 0, releasefn, desc=desc)
1003 l = lock.lock(lockname, 0, releasefn, desc=desc)
1004 except error.LockHeld, inst:
1004 except error.LockHeld, inst:
1005 if not wait:
1005 if not wait:
1006 raise
1006 raise
1007 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1007 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1008 (desc, inst.locker))
1008 (desc, inst.locker))
1009 # default to 600 seconds timeout
1009 # default to 600 seconds timeout
1010 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1010 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1011 releasefn, desc=desc)
1011 releasefn, desc=desc)
1012 if acquirefn:
1012 if acquirefn:
1013 acquirefn()
1013 acquirefn()
1014 return l
1014 return l
1015
1015
1016 def _afterlock(self, callback):
1016 def _afterlock(self, callback):
1017 """add a callback to the current repository lock.
1017 """add a callback to the current repository lock.
1018
1018
1019 The callback will be executed on lock release."""
1019 The callback will be executed on lock release."""
1020 l = self._lockref and self._lockref()
1020 l = self._lockref and self._lockref()
1021 if l:
1021 if l:
1022 l.postrelease.append(callback)
1022 l.postrelease.append(callback)
1023 else:
1023 else:
1024 callback()
1024 callback()
1025
1025
1026 def lock(self, wait=True):
1026 def lock(self, wait=True):
1027 '''Lock the repository store (.hg/store) and return a weak reference
1027 '''Lock the repository store (.hg/store) and return a weak reference
1028 to the lock. Use this before modifying the store (e.g. committing or
1028 to the lock. Use this before modifying the store (e.g. committing or
1029 stripping). If you are opening a transaction, get a lock as well.)'''
1029 stripping). If you are opening a transaction, get a lock as well.)'''
1030 l = self._lockref and self._lockref()
1030 l = self._lockref and self._lockref()
1031 if l is not None and l.held:
1031 if l is not None and l.held:
1032 l.lock()
1032 l.lock()
1033 return l
1033 return l
1034
1034
1035 def unlock():
1035 def unlock():
1036 self.store.write()
1036 self.store.write()
1037 if hasunfilteredcache(self, '_phasecache'):
1037 if hasunfilteredcache(self, '_phasecache'):
1038 self._phasecache.write()
1038 self._phasecache.write()
1039 for k, ce in self._filecache.items():
1039 for k, ce in self._filecache.items():
1040 if k == 'dirstate' or k not in self.__dict__:
1040 if k == 'dirstate' or k not in self.__dict__:
1041 continue
1041 continue
1042 ce.refresh()
1042 ce.refresh()
1043
1043
1044 l = self._lock(self.sjoin("lock"), wait, unlock,
1044 l = self._lock(self.sjoin("lock"), wait, unlock,
1045 self.invalidate, _('repository %s') % self.origroot)
1045 self.invalidate, _('repository %s') % self.origroot)
1046 self._lockref = weakref.ref(l)
1046 self._lockref = weakref.ref(l)
1047 return l
1047 return l
1048
1048
1049 def wlock(self, wait=True):
1049 def wlock(self, wait=True):
1050 '''Lock the non-store parts of the repository (everything under
1050 '''Lock the non-store parts of the repository (everything under
1051 .hg except .hg/store) and return a weak reference to the lock.
1051 .hg except .hg/store) and return a weak reference to the lock.
1052 Use this before modifying files in .hg.'''
1052 Use this before modifying files in .hg.'''
1053 l = self._wlockref and self._wlockref()
1053 l = self._wlockref and self._wlockref()
1054 if l is not None and l.held:
1054 if l is not None and l.held:
1055 l.lock()
1055 l.lock()
1056 return l
1056 return l
1057
1057
1058 def unlock():
1058 def unlock():
1059 self.dirstate.write()
1059 self.dirstate.write()
1060 self._filecache['dirstate'].refresh()
1060 self._filecache['dirstate'].refresh()
1061
1061
1062 l = self._lock(self.join("wlock"), wait, unlock,
1062 l = self._lock(self.join("wlock"), wait, unlock,
1063 self.invalidatedirstate, _('working directory of %s') %
1063 self.invalidatedirstate, _('working directory of %s') %
1064 self.origroot)
1064 self.origroot)
1065 self._wlockref = weakref.ref(l)
1065 self._wlockref = weakref.ref(l)
1066 return l
1066 return l
1067
1067
1068 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1068 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1069 """
1069 """
1070 commit an individual file as part of a larger transaction
1070 commit an individual file as part of a larger transaction
1071 """
1071 """
1072
1072
1073 fname = fctx.path()
1073 fname = fctx.path()
1074 text = fctx.data()
1074 text = fctx.data()
1075 flog = self.file(fname)
1075 flog = self.file(fname)
1076 fparent1 = manifest1.get(fname, nullid)
1076 fparent1 = manifest1.get(fname, nullid)
1077 fparent2 = fparent2o = manifest2.get(fname, nullid)
1077 fparent2 = fparent2o = manifest2.get(fname, nullid)
1078
1078
1079 meta = {}
1079 meta = {}
1080 copy = fctx.renamed()
1080 copy = fctx.renamed()
1081 if copy and copy[0] != fname:
1081 if copy and copy[0] != fname:
1082 # Mark the new revision of this file as a copy of another
1082 # Mark the new revision of this file as a copy of another
1083 # file. This copy data will effectively act as a parent
1083 # file. This copy data will effectively act as a parent
1084 # of this new revision. If this is a merge, the first
1084 # of this new revision. If this is a merge, the first
1085 # parent will be the nullid (meaning "look up the copy data")
1085 # parent will be the nullid (meaning "look up the copy data")
1086 # and the second one will be the other parent. For example:
1086 # and the second one will be the other parent. For example:
1087 #
1087 #
1088 # 0 --- 1 --- 3 rev1 changes file foo
1088 # 0 --- 1 --- 3 rev1 changes file foo
1089 # \ / rev2 renames foo to bar and changes it
1089 # \ / rev2 renames foo to bar and changes it
1090 # \- 2 -/ rev3 should have bar with all changes and
1090 # \- 2 -/ rev3 should have bar with all changes and
1091 # should record that bar descends from
1091 # should record that bar descends from
1092 # bar in rev2 and foo in rev1
1092 # bar in rev2 and foo in rev1
1093 #
1093 #
1094 # this allows this merge to succeed:
1094 # this allows this merge to succeed:
1095 #
1095 #
1096 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1096 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1097 # \ / merging rev3 and rev4 should use bar@rev2
1097 # \ / merging rev3 and rev4 should use bar@rev2
1098 # \- 2 --- 4 as the merge base
1098 # \- 2 --- 4 as the merge base
1099 #
1099 #
1100
1100
1101 cfname = copy[0]
1101 cfname = copy[0]
1102 crev = manifest1.get(cfname)
1102 crev = manifest1.get(cfname)
1103 newfparent = fparent2
1103 newfparent = fparent2
1104
1104
1105 if manifest2: # branch merge
1105 if manifest2: # branch merge
1106 if fparent2 == nullid or crev is None: # copied on remote side
1106 if fparent2 == nullid or crev is None: # copied on remote side
1107 if cfname in manifest2:
1107 if cfname in manifest2:
1108 crev = manifest2[cfname]
1108 crev = manifest2[cfname]
1109 newfparent = fparent1
1109 newfparent = fparent1
1110
1110
1111 # find source in nearest ancestor if we've lost track
1111 # find source in nearest ancestor if we've lost track
1112 if not crev:
1112 if not crev:
1113 self.ui.debug(" %s: searching for copy revision for %s\n" %
1113 self.ui.debug(" %s: searching for copy revision for %s\n" %
1114 (fname, cfname))
1114 (fname, cfname))
1115 for ancestor in self[None].ancestors():
1115 for ancestor in self[None].ancestors():
1116 if cfname in ancestor:
1116 if cfname in ancestor:
1117 crev = ancestor[cfname].filenode()
1117 crev = ancestor[cfname].filenode()
1118 break
1118 break
1119
1119
1120 if crev:
1120 if crev:
1121 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1121 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1122 meta["copy"] = cfname
1122 meta["copy"] = cfname
1123 meta["copyrev"] = hex(crev)
1123 meta["copyrev"] = hex(crev)
1124 fparent1, fparent2 = nullid, newfparent
1124 fparent1, fparent2 = nullid, newfparent
1125 else:
1125 else:
1126 self.ui.warn(_("warning: can't find ancestor for '%s' "
1126 self.ui.warn(_("warning: can't find ancestor for '%s' "
1127 "copied from '%s'!\n") % (fname, cfname))
1127 "copied from '%s'!\n") % (fname, cfname))
1128
1128
1129 elif fparent2 != nullid:
1129 elif fparent2 != nullid:
1130 # is one parent an ancestor of the other?
1130 # is one parent an ancestor of the other?
1131 fparentancestor = flog.ancestor(fparent1, fparent2)
1131 fparentancestor = flog.ancestor(fparent1, fparent2)
1132 if fparentancestor == fparent1:
1132 if fparentancestor == fparent1:
1133 fparent1, fparent2 = fparent2, nullid
1133 fparent1, fparent2 = fparent2, nullid
1134 elif fparentancestor == fparent2:
1134 elif fparentancestor == fparent2:
1135 fparent2 = nullid
1135 fparent2 = nullid
1136
1136
1137 # is the file changed?
1137 # is the file changed?
1138 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1138 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1139 changelist.append(fname)
1139 changelist.append(fname)
1140 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1140 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1141
1141
1142 # are just the flags changed during merge?
1142 # are just the flags changed during merge?
1143 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1143 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1144 changelist.append(fname)
1144 changelist.append(fname)
1145
1145
1146 return fparent1
1146 return fparent1
1147
1147
1148 @unfilteredmethod
1148 @unfilteredmethod
1149 def commit(self, text="", user=None, date=None, match=None, force=False,
1149 def commit(self, text="", user=None, date=None, match=None, force=False,
1150 editor=False, extra={}):
1150 editor=False, extra={}):
1151 """Add a new revision to current repository.
1151 """Add a new revision to current repository.
1152
1152
1153 Revision information is gathered from the working directory,
1153 Revision information is gathered from the working directory,
1154 match can be used to filter the committed files. If editor is
1154 match can be used to filter the committed files. If editor is
1155 supplied, it is called to get a commit message.
1155 supplied, it is called to get a commit message.
1156 """
1156 """
1157
1157
1158 def fail(f, msg):
1158 def fail(f, msg):
1159 raise util.Abort('%s: %s' % (f, msg))
1159 raise util.Abort('%s: %s' % (f, msg))
1160
1160
1161 if not match:
1161 if not match:
1162 match = matchmod.always(self.root, '')
1162 match = matchmod.always(self.root, '')
1163
1163
1164 if not force:
1164 if not force:
1165 vdirs = []
1165 vdirs = []
1166 match.explicitdir = vdirs.append
1166 match.explicitdir = vdirs.append
1167 match.bad = fail
1167 match.bad = fail
1168
1168
1169 wlock = self.wlock()
1169 wlock = self.wlock()
1170 try:
1170 try:
1171 wctx = self[None]
1171 wctx = self[None]
1172 merge = len(wctx.parents()) > 1
1172 merge = len(wctx.parents()) > 1
1173
1173
1174 if (not force and merge and match and
1174 if (not force and merge and match and
1175 (match.files() or match.anypats())):
1175 (match.files() or match.anypats())):
1176 raise util.Abort(_('cannot partially commit a merge '
1176 raise util.Abort(_('cannot partially commit a merge '
1177 '(do not specify files or patterns)'))
1177 '(do not specify files or patterns)'))
1178
1178
1179 changes = self.status(match=match, clean=force)
1179 changes = self.status(match=match, clean=force)
1180 if force:
1180 if force:
1181 changes[0].extend(changes[6]) # mq may commit unchanged files
1181 changes[0].extend(changes[6]) # mq may commit unchanged files
1182
1182
1183 # check subrepos
1183 # check subrepos
1184 subs = []
1184 subs = []
1185 commitsubs = set()
1185 commitsubs = set()
1186 newstate = wctx.substate.copy()
1186 newstate = wctx.substate.copy()
1187 # only manage subrepos and .hgsubstate if .hgsub is present
1187 # only manage subrepos and .hgsubstate if .hgsub is present
1188 if '.hgsub' in wctx:
1188 if '.hgsub' in wctx:
1189 # we'll decide whether to track this ourselves, thanks
1189 # we'll decide whether to track this ourselves, thanks
1190 if '.hgsubstate' in changes[0]:
1190 if '.hgsubstate' in changes[0]:
1191 changes[0].remove('.hgsubstate')
1191 changes[0].remove('.hgsubstate')
1192 if '.hgsubstate' in changes[2]:
1192 if '.hgsubstate' in changes[2]:
1193 changes[2].remove('.hgsubstate')
1193 changes[2].remove('.hgsubstate')
1194
1194
1195 # compare current state to last committed state
1195 # compare current state to last committed state
1196 # build new substate based on last committed state
1196 # build new substate based on last committed state
1197 oldstate = wctx.p1().substate
1197 oldstate = wctx.p1().substate
1198 for s in sorted(newstate.keys()):
1198 for s in sorted(newstate.keys()):
1199 if not match(s):
1199 if not match(s):
1200 # ignore working copy, use old state if present
1200 # ignore working copy, use old state if present
1201 if s in oldstate:
1201 if s in oldstate:
1202 newstate[s] = oldstate[s]
1202 newstate[s] = oldstate[s]
1203 continue
1203 continue
1204 if not force:
1204 if not force:
1205 raise util.Abort(
1205 raise util.Abort(
1206 _("commit with new subrepo %s excluded") % s)
1206 _("commit with new subrepo %s excluded") % s)
1207 if wctx.sub(s).dirty(True):
1207 if wctx.sub(s).dirty(True):
1208 if not self.ui.configbool('ui', 'commitsubrepos'):
1208 if not self.ui.configbool('ui', 'commitsubrepos'):
1209 raise util.Abort(
1209 raise util.Abort(
1210 _("uncommitted changes in subrepo %s") % s,
1210 _("uncommitted changes in subrepo %s") % s,
1211 hint=_("use --subrepos for recursive commit"))
1211 hint=_("use --subrepos for recursive commit"))
1212 subs.append(s)
1212 subs.append(s)
1213 commitsubs.add(s)
1213 commitsubs.add(s)
1214 else:
1214 else:
1215 bs = wctx.sub(s).basestate()
1215 bs = wctx.sub(s).basestate()
1216 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1216 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1217 if oldstate.get(s, (None, None, None))[1] != bs:
1217 if oldstate.get(s, (None, None, None))[1] != bs:
1218 subs.append(s)
1218 subs.append(s)
1219
1219
1220 # check for removed subrepos
1220 # check for removed subrepos
1221 for p in wctx.parents():
1221 for p in wctx.parents():
1222 r = [s for s in p.substate if s not in newstate]
1222 r = [s for s in p.substate if s not in newstate]
1223 subs += [s for s in r if match(s)]
1223 subs += [s for s in r if match(s)]
1224 if subs:
1224 if subs:
1225 if (not match('.hgsub') and
1225 if (not match('.hgsub') and
1226 '.hgsub' in (wctx.modified() + wctx.added())):
1226 '.hgsub' in (wctx.modified() + wctx.added())):
1227 raise util.Abort(
1227 raise util.Abort(
1228 _("can't commit subrepos without .hgsub"))
1228 _("can't commit subrepos without .hgsub"))
1229 changes[0].insert(0, '.hgsubstate')
1229 changes[0].insert(0, '.hgsubstate')
1230
1230
1231 elif '.hgsub' in changes[2]:
1231 elif '.hgsub' in changes[2]:
1232 # clean up .hgsubstate when .hgsub is removed
1232 # clean up .hgsubstate when .hgsub is removed
1233 if ('.hgsubstate' in wctx and
1233 if ('.hgsubstate' in wctx and
1234 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1234 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1235 changes[2].insert(0, '.hgsubstate')
1235 changes[2].insert(0, '.hgsubstate')
1236
1236
1237 # make sure all explicit patterns are matched
1237 # make sure all explicit patterns are matched
1238 if not force and match.files():
1238 if not force and match.files():
1239 matched = set(changes[0] + changes[1] + changes[2])
1239 matched = set(changes[0] + changes[1] + changes[2])
1240
1240
1241 for f in match.files():
1241 for f in match.files():
1242 f = self.dirstate.normalize(f)
1242 f = self.dirstate.normalize(f)
1243 if f == '.' or f in matched or f in wctx.substate:
1243 if f == '.' or f in matched or f in wctx.substate:
1244 continue
1244 continue
1245 if f in changes[3]: # missing
1245 if f in changes[3]: # missing
1246 fail(f, _('file not found!'))
1246 fail(f, _('file not found!'))
1247 if f in vdirs: # visited directory
1247 if f in vdirs: # visited directory
1248 d = f + '/'
1248 d = f + '/'
1249 for mf in matched:
1249 for mf in matched:
1250 if mf.startswith(d):
1250 if mf.startswith(d):
1251 break
1251 break
1252 else:
1252 else:
1253 fail(f, _("no match under directory!"))
1253 fail(f, _("no match under directory!"))
1254 elif f not in self.dirstate:
1254 elif f not in self.dirstate:
1255 fail(f, _("file not tracked!"))
1255 fail(f, _("file not tracked!"))
1256
1256
1257 cctx = context.workingctx(self, text, user, date, extra, changes)
1257 cctx = context.workingctx(self, text, user, date, extra, changes)
1258
1258
1259 if (not force and not extra.get("close") and not merge
1259 if (not force and not extra.get("close") and not merge
1260 and not cctx.files()
1260 and not cctx.files()
1261 and wctx.branch() == wctx.p1().branch()):
1261 and wctx.branch() == wctx.p1().branch()):
1262 return None
1262 return None
1263
1263
1264 if merge and cctx.deleted():
1264 if merge and cctx.deleted():
1265 raise util.Abort(_("cannot commit merge with missing files"))
1265 raise util.Abort(_("cannot commit merge with missing files"))
1266
1266
1267 ms = mergemod.mergestate(self)
1267 ms = mergemod.mergestate(self)
1268 for f in changes[0]:
1268 for f in changes[0]:
1269 if f in ms and ms[f] == 'u':
1269 if f in ms and ms[f] == 'u':
1270 raise util.Abort(_("unresolved merge conflicts "
1270 raise util.Abort(_("unresolved merge conflicts "
1271 "(see hg help resolve)"))
1271 "(see hg help resolve)"))
1272
1272
1273 if editor:
1273 if editor:
1274 cctx._text = editor(self, cctx, subs)
1274 cctx._text = editor(self, cctx, subs)
1275 edited = (text != cctx._text)
1275 edited = (text != cctx._text)
1276
1276
1277 # commit subs and write new state
1277 # commit subs and write new state
1278 if subs:
1278 if subs:
1279 for s in sorted(commitsubs):
1279 for s in sorted(commitsubs):
1280 sub = wctx.sub(s)
1280 sub = wctx.sub(s)
1281 self.ui.status(_('committing subrepository %s\n') %
1281 self.ui.status(_('committing subrepository %s\n') %
1282 subrepo.subrelpath(sub))
1282 subrepo.subrelpath(sub))
1283 sr = sub.commit(cctx._text, user, date)
1283 sr = sub.commit(cctx._text, user, date)
1284 newstate[s] = (newstate[s][0], sr)
1284 newstate[s] = (newstate[s][0], sr)
1285 subrepo.writestate(self, newstate)
1285 subrepo.writestate(self, newstate)
1286
1286
1287 # Save commit message in case this transaction gets rolled back
1287 # Save commit message in case this transaction gets rolled back
1288 # (e.g. by a pretxncommit hook). Leave the content alone on
1288 # (e.g. by a pretxncommit hook). Leave the content alone on
1289 # the assumption that the user will use the same editor again.
1289 # the assumption that the user will use the same editor again.
1290 msgfn = self.savecommitmessage(cctx._text)
1290 msgfn = self.savecommitmessage(cctx._text)
1291
1291
1292 p1, p2 = self.dirstate.parents()
1292 p1, p2 = self.dirstate.parents()
1293 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1293 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1294 try:
1294 try:
1295 self.hook("precommit", throw=True, parent1=hookp1,
1295 self.hook("precommit", throw=True, parent1=hookp1,
1296 parent2=hookp2)
1296 parent2=hookp2)
1297 ret = self.commitctx(cctx, True)
1297 ret = self.commitctx(cctx, True)
1298 except: # re-raises
1298 except: # re-raises
1299 if edited:
1299 if edited:
1300 self.ui.write(
1300 self.ui.write(
1301 _('note: commit message saved in %s\n') % msgfn)
1301 _('note: commit message saved in %s\n') % msgfn)
1302 raise
1302 raise
1303
1303
1304 # update bookmarks, dirstate and mergestate
1304 # update bookmarks, dirstate and mergestate
1305 bookmarks.update(self, [p1, p2], ret)
1305 bookmarks.update(self, [p1, p2], ret)
1306 cctx.markcommitted(ret)
1306 cctx.markcommitted(ret)
1307 ms.reset()
1307 ms.reset()
1308 finally:
1308 finally:
1309 wlock.release()
1309 wlock.release()
1310
1310
1311 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1311 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1312 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1312 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1313 self._afterlock(commithook)
1313 self._afterlock(commithook)
1314 return ret
1314 return ret
1315
1315
1316 @unfilteredmethod
1316 @unfilteredmethod
1317 def commitctx(self, ctx, error=False):
1317 def commitctx(self, ctx, error=False):
1318 """Add a new revision to current repository.
1318 """Add a new revision to current repository.
1319 Revision information is passed via the context argument.
1319 Revision information is passed via the context argument.
1320 """
1320 """
1321
1321
1322 tr = lock = None
1322 tr = lock = None
1323 removed = list(ctx.removed())
1323 removed = list(ctx.removed())
1324 p1, p2 = ctx.p1(), ctx.p2()
1324 p1, p2 = ctx.p1(), ctx.p2()
1325 user = ctx.user()
1325 user = ctx.user()
1326
1326
1327 lock = self.lock()
1327 lock = self.lock()
1328 try:
1328 try:
1329 tr = self.transaction("commit")
1329 tr = self.transaction("commit")
1330 trp = weakref.proxy(tr)
1330 trp = weakref.proxy(tr)
1331
1331
1332 if ctx.files():
1332 if ctx.files():
1333 m1 = p1.manifest().copy()
1333 m1 = p1.manifest().copy()
1334 m2 = p2.manifest()
1334 m2 = p2.manifest()
1335
1335
1336 # check in files
1336 # check in files
1337 new = {}
1337 new = {}
1338 changed = []
1338 changed = []
1339 linkrev = len(self)
1339 linkrev = len(self)
1340 for f in sorted(ctx.modified() + ctx.added()):
1340 for f in sorted(ctx.modified() + ctx.added()):
1341 self.ui.note(f + "\n")
1341 self.ui.note(f + "\n")
1342 try:
1342 try:
1343 fctx = ctx[f]
1343 fctx = ctx[f]
1344 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1344 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1345 changed)
1345 changed)
1346 m1.set(f, fctx.flags())
1346 m1.set(f, fctx.flags())
1347 except OSError, inst:
1347 except OSError, inst:
1348 self.ui.warn(_("trouble committing %s!\n") % f)
1348 self.ui.warn(_("trouble committing %s!\n") % f)
1349 raise
1349 raise
1350 except IOError, inst:
1350 except IOError, inst:
1351 errcode = getattr(inst, 'errno', errno.ENOENT)
1351 errcode = getattr(inst, 'errno', errno.ENOENT)
1352 if error or errcode and errcode != errno.ENOENT:
1352 if error or errcode and errcode != errno.ENOENT:
1353 self.ui.warn(_("trouble committing %s!\n") % f)
1353 self.ui.warn(_("trouble committing %s!\n") % f)
1354 raise
1354 raise
1355 else:
1355 else:
1356 removed.append(f)
1356 removed.append(f)
1357
1357
1358 # update manifest
1358 # update manifest
1359 m1.update(new)
1359 m1.update(new)
1360 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1360 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1361 drop = [f for f in removed if f in m1]
1361 drop = [f for f in removed if f in m1]
1362 for f in drop:
1362 for f in drop:
1363 del m1[f]
1363 del m1[f]
1364 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1364 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1365 p2.manifestnode(), (new, drop))
1365 p2.manifestnode(), (new, drop))
1366 files = changed + removed
1366 files = changed + removed
1367 else:
1367 else:
1368 mn = p1.manifestnode()
1368 mn = p1.manifestnode()
1369 files = []
1369 files = []
1370
1370
1371 # update changelog
1371 # update changelog
1372 self.changelog.delayupdate()
1372 self.changelog.delayupdate()
1373 n = self.changelog.add(mn, files, ctx.description(),
1373 n = self.changelog.add(mn, files, ctx.description(),
1374 trp, p1.node(), p2.node(),
1374 trp, p1.node(), p2.node(),
1375 user, ctx.date(), ctx.extra().copy())
1375 user, ctx.date(), ctx.extra().copy())
1376 p = lambda: self.changelog.writepending() and self.root or ""
1376 p = lambda: self.changelog.writepending() and self.root or ""
1377 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1377 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1378 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1378 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1379 parent2=xp2, pending=p)
1379 parent2=xp2, pending=p)
1380 self.changelog.finalize(trp)
1380 self.changelog.finalize(trp)
1381 # set the new commit is proper phase
1381 # set the new commit is proper phase
1382 targetphase = phases.newcommitphase(self.ui)
1382 targetphase = phases.newcommitphase(self.ui)
1383 if targetphase:
1383 if targetphase:
1384 # retract boundary do not alter parent changeset.
1384 # retract boundary do not alter parent changeset.
1385 # if a parent have higher the resulting phase will
1385 # if a parent have higher the resulting phase will
1386 # be compliant anyway
1386 # be compliant anyway
1387 #
1387 #
1388 # if minimal phase was 0 we don't need to retract anything
1388 # if minimal phase was 0 we don't need to retract anything
1389 phases.retractboundary(self, targetphase, [n])
1389 phases.retractboundary(self, targetphase, [n])
1390 tr.close()
1390 tr.close()
1391 branchmap.updatecache(self.filtered('served'))
1391 branchmap.updatecache(self.filtered('served'))
1392 return n
1392 return n
1393 finally:
1393 finally:
1394 if tr:
1394 if tr:
1395 tr.release()
1395 tr.release()
1396 lock.release()
1396 lock.release()
1397
1397
1398 @unfilteredmethod
1398 @unfilteredmethod
1399 def destroying(self):
1399 def destroying(self):
1400 '''Inform the repository that nodes are about to be destroyed.
1400 '''Inform the repository that nodes are about to be destroyed.
1401 Intended for use by strip and rollback, so there's a common
1401 Intended for use by strip and rollback, so there's a common
1402 place for anything that has to be done before destroying history.
1402 place for anything that has to be done before destroying history.
1403
1403
1404 This is mostly useful for saving state that is in memory and waiting
1404 This is mostly useful for saving state that is in memory and waiting
1405 to be flushed when the current lock is released. Because a call to
1405 to be flushed when the current lock is released. Because a call to
1406 destroyed is imminent, the repo will be invalidated causing those
1406 destroyed is imminent, the repo will be invalidated causing those
1407 changes to stay in memory (waiting for the next unlock), or vanish
1407 changes to stay in memory (waiting for the next unlock), or vanish
1408 completely.
1408 completely.
1409 '''
1409 '''
1410 # When using the same lock to commit and strip, the phasecache is left
1410 # When using the same lock to commit and strip, the phasecache is left
1411 # dirty after committing. Then when we strip, the repo is invalidated,
1411 # dirty after committing. Then when we strip, the repo is invalidated,
1412 # causing those changes to disappear.
1412 # causing those changes to disappear.
1413 if '_phasecache' in vars(self):
1413 if '_phasecache' in vars(self):
1414 self._phasecache.write()
1414 self._phasecache.write()
1415
1415
1416 @unfilteredmethod
1416 @unfilteredmethod
1417 def destroyed(self):
1417 def destroyed(self):
1418 '''Inform the repository that nodes have been destroyed.
1418 '''Inform the repository that nodes have been destroyed.
1419 Intended for use by strip and rollback, so there's a common
1419 Intended for use by strip and rollback, so there's a common
1420 place for anything that has to be done after destroying history.
1420 place for anything that has to be done after destroying history.
1421 '''
1421 '''
1422 # When one tries to:
1422 # When one tries to:
1423 # 1) destroy nodes thus calling this method (e.g. strip)
1423 # 1) destroy nodes thus calling this method (e.g. strip)
1424 # 2) use phasecache somewhere (e.g. commit)
1424 # 2) use phasecache somewhere (e.g. commit)
1425 #
1425 #
1426 # then 2) will fail because the phasecache contains nodes that were
1426 # then 2) will fail because the phasecache contains nodes that were
1427 # removed. We can either remove phasecache from the filecache,
1427 # removed. We can either remove phasecache from the filecache,
1428 # causing it to reload next time it is accessed, or simply filter
1428 # causing it to reload next time it is accessed, or simply filter
1429 # the removed nodes now and write the updated cache.
1429 # the removed nodes now and write the updated cache.
1430 self._phasecache.filterunknown(self)
1430 self._phasecache.filterunknown(self)
1431 self._phasecache.write()
1431 self._phasecache.write()
1432
1432
1433 # update the 'served' branch cache to help read only server process
1433 # update the 'served' branch cache to help read only server process
1434 # Thanks to branchcache collaboration this is done from the nearest
1434 # Thanks to branchcache collaboration this is done from the nearest
1435 # filtered subset and it is expected to be fast.
1435 # filtered subset and it is expected to be fast.
1436 branchmap.updatecache(self.filtered('served'))
1436 branchmap.updatecache(self.filtered('served'))
1437
1437
1438 # Ensure the persistent tag cache is updated. Doing it now
1438 # Ensure the persistent tag cache is updated. Doing it now
1439 # means that the tag cache only has to worry about destroyed
1439 # means that the tag cache only has to worry about destroyed
1440 # heads immediately after a strip/rollback. That in turn
1440 # heads immediately after a strip/rollback. That in turn
1441 # guarantees that "cachetip == currenttip" (comparing both rev
1441 # guarantees that "cachetip == currenttip" (comparing both rev
1442 # and node) always means no nodes have been added or destroyed.
1442 # and node) always means no nodes have been added or destroyed.
1443
1443
1444 # XXX this is suboptimal when qrefresh'ing: we strip the current
1444 # XXX this is suboptimal when qrefresh'ing: we strip the current
1445 # head, refresh the tag cache, then immediately add a new head.
1445 # head, refresh the tag cache, then immediately add a new head.
1446 # But I think doing it this way is necessary for the "instant
1446 # But I think doing it this way is necessary for the "instant
1447 # tag cache retrieval" case to work.
1447 # tag cache retrieval" case to work.
1448 self.invalidate()
1448 self.invalidate()
1449
1449
1450 def walk(self, match, node=None):
1450 def walk(self, match, node=None):
1451 '''
1451 '''
1452 walk recursively through the directory tree or a given
1452 walk recursively through the directory tree or a given
1453 changeset, finding all files matched by the match
1453 changeset, finding all files matched by the match
1454 function
1454 function
1455 '''
1455 '''
1456 return self[node].walk(match)
1456 return self[node].walk(match)
1457
1457
1458 def status(self, node1='.', node2=None, match=None,
1458 def status(self, node1='.', node2=None, match=None,
1459 ignored=False, clean=False, unknown=False,
1459 ignored=False, clean=False, unknown=False,
1460 listsubrepos=False):
1460 listsubrepos=False):
1461 """return status of files between two nodes or node and working
1461 """return status of files between two nodes or node and working
1462 directory.
1462 directory.
1463
1463
1464 If node1 is None, use the first dirstate parent instead.
1464 If node1 is None, use the first dirstate parent instead.
1465 If node2 is None, compare node1 with working directory.
1465 If node2 is None, compare node1 with working directory.
1466 """
1466 """
1467
1467
1468 def mfmatches(ctx):
1468 def mfmatches(ctx):
1469 mf = ctx.manifest().copy()
1469 mf = ctx.manifest().copy()
1470 if match.always():
1470 if match.always():
1471 return mf
1471 return mf
1472 for fn in mf.keys():
1472 for fn in mf.keys():
1473 if not match(fn):
1473 if not match(fn):
1474 del mf[fn]
1474 del mf[fn]
1475 return mf
1475 return mf
1476
1476
1477 ctx1 = self[node1]
1477 ctx1 = self[node1]
1478 ctx2 = self[node2]
1478 ctx2 = self[node2]
1479
1479
1480 working = ctx2.rev() is None
1480 working = ctx2.rev() is None
1481 parentworking = working and ctx1 == self['.']
1481 parentworking = working and ctx1 == self['.']
1482 match = match or matchmod.always(self.root, self.getcwd())
1482 match = match or matchmod.always(self.root, self.getcwd())
1483 listignored, listclean, listunknown = ignored, clean, unknown
1483 listignored, listclean, listunknown = ignored, clean, unknown
1484
1484
1485 # load earliest manifest first for caching reasons
1485 # load earliest manifest first for caching reasons
1486 if not working and ctx2.rev() < ctx1.rev():
1486 if not working and ctx2.rev() < ctx1.rev():
1487 ctx2.manifest()
1487 ctx2.manifest()
1488
1488
1489 if not parentworking:
1489 if not parentworking:
1490 def bad(f, msg):
1490 def bad(f, msg):
1491 # 'f' may be a directory pattern from 'match.files()',
1491 # 'f' may be a directory pattern from 'match.files()',
1492 # so 'f not in ctx1' is not enough
1492 # so 'f not in ctx1' is not enough
1493 if f not in ctx1 and f not in ctx1.dirs():
1493 if f not in ctx1 and f not in ctx1.dirs():
1494 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1494 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1495 match.bad = bad
1495 match.bad = bad
1496
1496
1497 if working: # we need to scan the working dir
1497 if working: # we need to scan the working dir
1498 subrepos = []
1498 subrepos = []
1499 if '.hgsub' in self.dirstate:
1499 if '.hgsub' in self.dirstate:
1500 subrepos = sorted(ctx2.substate)
1500 subrepos = sorted(ctx2.substate)
1501 s = self.dirstate.status(match, subrepos, listignored,
1501 s = self.dirstate.status(match, subrepos, listignored,
1502 listclean, listunknown)
1502 listclean, listunknown)
1503 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1503 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1504
1504
1505 # check for any possibly clean files
1505 # check for any possibly clean files
1506 if parentworking and cmp:
1506 if parentworking and cmp:
1507 fixup = []
1507 fixup = []
1508 # do a full compare of any files that might have changed
1508 # do a full compare of any files that might have changed
1509 for f in sorted(cmp):
1509 for f in sorted(cmp):
1510 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1510 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1511 or ctx1[f].cmp(ctx2[f])):
1511 or ctx1[f].cmp(ctx2[f])):
1512 modified.append(f)
1512 modified.append(f)
1513 else:
1513 else:
1514 fixup.append(f)
1514 fixup.append(f)
1515
1515
1516 # update dirstate for files that are actually clean
1516 # update dirstate for files that are actually clean
1517 if fixup:
1517 if fixup:
1518 if listclean:
1518 if listclean:
1519 clean += fixup
1519 clean += fixup
1520
1520
1521 try:
1521 try:
1522 # updating the dirstate is optional
1522 # updating the dirstate is optional
1523 # so we don't wait on the lock
1523 # so we don't wait on the lock
1524 wlock = self.wlock(False)
1524 wlock = self.wlock(False)
1525 try:
1525 try:
1526 for f in fixup:
1526 for f in fixup:
1527 self.dirstate.normal(f)
1527 self.dirstate.normal(f)
1528 finally:
1528 finally:
1529 wlock.release()
1529 wlock.release()
1530 except error.LockError:
1530 except error.LockError:
1531 pass
1531 pass
1532
1532
1533 if not parentworking:
1533 if not parentworking:
1534 mf1 = mfmatches(ctx1)
1534 mf1 = mfmatches(ctx1)
1535 if working:
1535 if working:
1536 # we are comparing working dir against non-parent
1536 # we are comparing working dir against non-parent
1537 # generate a pseudo-manifest for the working dir
1537 # generate a pseudo-manifest for the working dir
1538 mf2 = mfmatches(self['.'])
1538 mf2 = mfmatches(self['.'])
1539 for f in cmp + modified + added:
1539 for f in cmp + modified + added:
1540 mf2[f] = None
1540 mf2[f] = None
1541 mf2.set(f, ctx2.flags(f))
1541 mf2.set(f, ctx2.flags(f))
1542 for f in removed:
1542 for f in removed:
1543 if f in mf2:
1543 if f in mf2:
1544 del mf2[f]
1544 del mf2[f]
1545 else:
1545 else:
1546 # we are comparing two revisions
1546 # we are comparing two revisions
1547 deleted, unknown, ignored = [], [], []
1547 deleted, unknown, ignored = [], [], []
1548 mf2 = mfmatches(ctx2)
1548 mf2 = mfmatches(ctx2)
1549
1549
1550 modified, added, clean = [], [], []
1550 modified, added, clean = [], [], []
1551 withflags = mf1.withflags() | mf2.withflags()
1551 withflags = mf1.withflags() | mf2.withflags()
1552 for fn, mf2node in mf2.iteritems():
1552 for fn, mf2node in mf2.iteritems():
1553 if fn in mf1:
1553 if fn in mf1:
1554 if (fn not in deleted and
1554 if (fn not in deleted and
1555 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1555 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1556 (mf1[fn] != mf2node and
1556 (mf1[fn] != mf2node and
1557 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1557 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1558 modified.append(fn)
1558 modified.append(fn)
1559 elif listclean:
1559 elif listclean:
1560 clean.append(fn)
1560 clean.append(fn)
1561 del mf1[fn]
1561 del mf1[fn]
1562 elif fn not in deleted:
1562 elif fn not in deleted:
1563 added.append(fn)
1563 added.append(fn)
1564 removed = mf1.keys()
1564 removed = mf1.keys()
1565
1565
1566 if working and modified and not self.dirstate._checklink:
1566 if working and modified and not self.dirstate._checklink:
1567 # Symlink placeholders may get non-symlink-like contents
1567 # Symlink placeholders may get non-symlink-like contents
1568 # via user error or dereferencing by NFS or Samba servers,
1568 # via user error or dereferencing by NFS or Samba servers,
1569 # so we filter out any placeholders that don't look like a
1569 # so we filter out any placeholders that don't look like a
1570 # symlink
1570 # symlink
1571 sane = []
1571 sane = []
1572 for f in modified:
1572 for f in modified:
1573 if ctx2.flags(f) == 'l':
1573 if ctx2.flags(f) == 'l':
1574 d = ctx2[f].data()
1574 d = ctx2[f].data()
1575 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1575 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1576 self.ui.debug('ignoring suspect symlink placeholder'
1576 self.ui.debug('ignoring suspect symlink placeholder'
1577 ' "%s"\n' % f)
1577 ' "%s"\n' % f)
1578 continue
1578 continue
1579 sane.append(f)
1579 sane.append(f)
1580 modified = sane
1580 modified = sane
1581
1581
1582 r = modified, added, removed, deleted, unknown, ignored, clean
1582 r = modified, added, removed, deleted, unknown, ignored, clean
1583
1583
1584 if listsubrepos:
1584 if listsubrepos:
1585 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1585 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1586 if working:
1586 if working:
1587 rev2 = None
1587 rev2 = None
1588 else:
1588 else:
1589 rev2 = ctx2.substate[subpath][1]
1589 rev2 = ctx2.substate[subpath][1]
1590 try:
1590 try:
1591 submatch = matchmod.narrowmatcher(subpath, match)
1591 submatch = matchmod.narrowmatcher(subpath, match)
1592 s = sub.status(rev2, match=submatch, ignored=listignored,
1592 s = sub.status(rev2, match=submatch, ignored=listignored,
1593 clean=listclean, unknown=listunknown,
1593 clean=listclean, unknown=listunknown,
1594 listsubrepos=True)
1594 listsubrepos=True)
1595 for rfiles, sfiles in zip(r, s):
1595 for rfiles, sfiles in zip(r, s):
1596 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1596 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1597 except error.LookupError:
1597 except error.LookupError:
1598 self.ui.status(_("skipping missing subrepository: %s\n")
1598 self.ui.status(_("skipping missing subrepository: %s\n")
1599 % subpath)
1599 % subpath)
1600
1600
1601 for l in r:
1601 for l in r:
1602 l.sort()
1602 l.sort()
1603 return r
1603 return r
1604
1604
1605 def heads(self, start=None):
1605 def heads(self, start=None):
1606 heads = self.changelog.heads(start)
1606 heads = self.changelog.heads(start)
1607 # sort the output in rev descending order
1607 # sort the output in rev descending order
1608 return sorted(heads, key=self.changelog.rev, reverse=True)
1608 return sorted(heads, key=self.changelog.rev, reverse=True)
1609
1609
1610 def branchheads(self, branch=None, start=None, closed=False):
1610 def branchheads(self, branch=None, start=None, closed=False):
1611 '''return a (possibly filtered) list of heads for the given branch
1611 '''return a (possibly filtered) list of heads for the given branch
1612
1612
1613 Heads are returned in topological order, from newest to oldest.
1613 Heads are returned in topological order, from newest to oldest.
1614 If branch is None, use the dirstate branch.
1614 If branch is None, use the dirstate branch.
1615 If start is not None, return only heads reachable from start.
1615 If start is not None, return only heads reachable from start.
1616 If closed is True, return heads that are marked as closed as well.
1616 If closed is True, return heads that are marked as closed as well.
1617 '''
1617 '''
1618 if branch is None:
1618 if branch is None:
1619 branch = self[None].branch()
1619 branch = self[None].branch()
1620 branches = self.branchmap()
1620 branches = self.branchmap()
1621 if branch not in branches:
1621 if branch not in branches:
1622 return []
1622 return []
1623 # the cache returns heads ordered lowest to highest
1623 # the cache returns heads ordered lowest to highest
1624 bheads = list(reversed(branches[branch]))
1624 bheads = list(reversed(branches[branch]))
1625 if start is not None:
1625 if start is not None:
1626 # filter out the heads that cannot be reached from startrev
1626 # filter out the heads that cannot be reached from startrev
1627 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1627 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1628 bheads = [h for h in bheads if h in fbheads]
1628 bheads = [h for h in bheads if h in fbheads]
1629 if not closed:
1629 if not closed:
1630 bheads = [h for h in bheads if not self[h].closesbranch()]
1630 bheads = [h for h in bheads if not self[h].closesbranch()]
1631 return bheads
1631 return bheads
1632
1632
1633 def branches(self, nodes):
1633 def branches(self, nodes):
1634 if not nodes:
1634 if not nodes:
1635 nodes = [self.changelog.tip()]
1635 nodes = [self.changelog.tip()]
1636 b = []
1636 b = []
1637 for n in nodes:
1637 for n in nodes:
1638 t = n
1638 t = n
1639 while True:
1639 while True:
1640 p = self.changelog.parents(n)
1640 p = self.changelog.parents(n)
1641 if p[1] != nullid or p[0] == nullid:
1641 if p[1] != nullid or p[0] == nullid:
1642 b.append((t, n, p[0], p[1]))
1642 b.append((t, n, p[0], p[1]))
1643 break
1643 break
1644 n = p[0]
1644 n = p[0]
1645 return b
1645 return b
1646
1646
1647 def between(self, pairs):
1647 def between(self, pairs):
1648 r = []
1648 r = []
1649
1649
1650 for top, bottom in pairs:
1650 for top, bottom in pairs:
1651 n, l, i = top, [], 0
1651 n, l, i = top, [], 0
1652 f = 1
1652 f = 1
1653
1653
1654 while n != bottom and n != nullid:
1654 while n != bottom and n != nullid:
1655 p = self.changelog.parents(n)[0]
1655 p = self.changelog.parents(n)[0]
1656 if i == f:
1656 if i == f:
1657 l.append(n)
1657 l.append(n)
1658 f = f * 2
1658 f = f * 2
1659 n = p
1659 n = p
1660 i += 1
1660 i += 1
1661
1661
1662 r.append(l)
1662 r.append(l)
1663
1663
1664 return r
1664 return r
1665
1665
1666 def pull(self, remote, heads=None, force=False):
1666 def pull(self, remote, heads=None, force=False):
1667 if remote.local():
1667 if remote.local():
1668 missing = set(remote.requirements) - self.supported
1668 missing = set(remote.requirements) - self.supported
1669 if missing:
1669 if missing:
1670 msg = _("required features are not"
1670 msg = _("required features are not"
1671 " supported in the destination:"
1671 " supported in the destination:"
1672 " %s") % (', '.join(sorted(missing)))
1672 " %s") % (', '.join(sorted(missing)))
1673 raise util.Abort(msg)
1673 raise util.Abort(msg)
1674
1674
1675 # don't open transaction for nothing or you break future useful
1675 # don't open transaction for nothing or you break future useful
1676 # rollback call
1676 # rollback call
1677 tr = None
1677 tr = None
1678 trname = 'pull\n' + util.hidepassword(remote.url())
1678 trname = 'pull\n' + util.hidepassword(remote.url())
1679 lock = self.lock()
1679 lock = self.lock()
1680 try:
1680 try:
1681 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1681 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1682 force=force)
1682 force=force)
1683 common, fetch, rheads = tmp
1683 common, fetch, rheads = tmp
1684 if not fetch:
1684 if not fetch:
1685 self.ui.status(_("no changes found\n"))
1685 self.ui.status(_("no changes found\n"))
1686 added = []
1686 added = []
1687 result = 0
1687 result = 0
1688 else:
1688 else:
1689 tr = self.transaction(trname)
1689 tr = self.transaction(trname)
1690 if heads is None and list(common) == [nullid]:
1690 if heads is None and list(common) == [nullid]:
1691 self.ui.status(_("requesting all changes\n"))
1691 self.ui.status(_("requesting all changes\n"))
1692 elif heads is None and remote.capable('changegroupsubset'):
1692 elif heads is None and remote.capable('changegroupsubset'):
1693 # issue1320, avoid a race if remote changed after discovery
1693 # issue1320, avoid a race if remote changed after discovery
1694 heads = rheads
1694 heads = rheads
1695
1695
1696 if remote.capable('getbundle'):
1696 if remote.capable('getbundle'):
1697 # TODO: get bundlecaps from remote
1697 # TODO: get bundlecaps from remote
1698 cg = remote.getbundle('pull', common=common,
1698 cg = remote.getbundle('pull', common=common,
1699 heads=heads or rheads)
1699 heads=heads or rheads)
1700 elif heads is None:
1700 elif heads is None:
1701 cg = remote.changegroup(fetch, 'pull')
1701 cg = remote.changegroup(fetch, 'pull')
1702 elif not remote.capable('changegroupsubset'):
1702 elif not remote.capable('changegroupsubset'):
1703 raise util.Abort(_("partial pull cannot be done because "
1703 raise util.Abort(_("partial pull cannot be done because "
1704 "other repository doesn't support "
1704 "other repository doesn't support "
1705 "changegroupsubset."))
1705 "changegroupsubset."))
1706 else:
1706 else:
1707 cg = remote.changegroupsubset(fetch, heads, 'pull')
1707 cg = remote.changegroupsubset(fetch, heads, 'pull')
1708 # we use unfiltered changelog here because hidden revision must
1708 # we use unfiltered changelog here because hidden revision must
1709 # be taken in account for phase synchronization. They may
1709 # be taken in account for phase synchronization. They may
1710 # becomes public and becomes visible again.
1710 # becomes public and becomes visible again.
1711 cl = self.unfiltered().changelog
1711 cl = self.unfiltered().changelog
1712 clstart = len(cl)
1712 clstart = len(cl)
1713 result = self.addchangegroup(cg, 'pull', remote.url())
1713 result = self.addchangegroup(cg, 'pull', remote.url())
1714 clend = len(cl)
1714 clend = len(cl)
1715 added = [cl.node(r) for r in xrange(clstart, clend)]
1715 added = [cl.node(r) for r in xrange(clstart, clend)]
1716
1716
1717 # compute target subset
1717 # compute target subset
1718 if heads is None:
1718 if heads is None:
1719 # We pulled every thing possible
1719 # We pulled every thing possible
1720 # sync on everything common
1720 # sync on everything common
1721 subset = common + added
1721 subset = common + added
1722 else:
1722 else:
1723 # We pulled a specific subset
1723 # We pulled a specific subset
1724 # sync on this subset
1724 # sync on this subset
1725 subset = heads
1725 subset = heads
1726
1726
1727 # Get remote phases data from remote
1727 # Get remote phases data from remote
1728 remotephases = remote.listkeys('phases')
1728 remotephases = remote.listkeys('phases')
1729 publishing = bool(remotephases.get('publishing', False))
1729 publishing = bool(remotephases.get('publishing', False))
1730 if remotephases and not publishing:
1730 if remotephases and not publishing:
1731 # remote is new and unpublishing
1731 # remote is new and unpublishing
1732 pheads, _dr = phases.analyzeremotephases(self, subset,
1732 pheads, _dr = phases.analyzeremotephases(self, subset,
1733 remotephases)
1733 remotephases)
1734 phases.advanceboundary(self, phases.public, pheads)
1734 phases.advanceboundary(self, phases.public, pheads)
1735 phases.advanceboundary(self, phases.draft, subset)
1735 phases.advanceboundary(self, phases.draft, subset)
1736 else:
1736 else:
1737 # Remote is old or publishing all common changesets
1737 # Remote is old or publishing all common changesets
1738 # should be seen as public
1738 # should be seen as public
1739 phases.advanceboundary(self, phases.public, subset)
1739 phases.advanceboundary(self, phases.public, subset)
1740
1740
1741 def gettransaction():
1741 def gettransaction():
1742 if tr is None:
1742 if tr is None:
1743 return self.transaction(trname)
1743 return self.transaction(trname)
1744 return tr
1744 return tr
1745
1745
1746 obstr = obsolete.syncpull(self, remote, gettransaction)
1746 obstr = obsolete.syncpull(self, remote, gettransaction)
1747 if obstr is not None:
1747 if obstr is not None:
1748 tr = obstr
1748 tr = obstr
1749
1749
1750 if tr is not None:
1750 if tr is not None:
1751 tr.close()
1751 tr.close()
1752 finally:
1752 finally:
1753 if tr is not None:
1753 if tr is not None:
1754 tr.release()
1754 tr.release()
1755 lock.release()
1755 lock.release()
1756
1756
1757 return result
1757 return result
1758
1758
1759 def checkpush(self, force, revs):
1759 def checkpush(self, force, revs):
1760 """Extensions can override this function if additional checks have
1760 """Extensions can override this function if additional checks have
1761 to be performed before pushing, or call it if they override push
1761 to be performed before pushing, or call it if they override push
1762 command.
1762 command.
1763 """
1763 """
1764 pass
1764 pass
1765
1765
1766 def push(self, remote, force=False, revs=None, newbranch=False):
1766 def push(self, remote, force=False, revs=None, newbranch=False):
1767 '''Push outgoing changesets (limited by revs) from the current
1767 '''Push outgoing changesets (limited by revs) from the current
1768 repository to remote. Return an integer:
1768 repository to remote. Return an integer:
1769 - None means nothing to push
1769 - None means nothing to push
1770 - 0 means HTTP error
1770 - 0 means HTTP error
1771 - 1 means we pushed and remote head count is unchanged *or*
1771 - 1 means we pushed and remote head count is unchanged *or*
1772 we have outgoing changesets but refused to push
1772 we have outgoing changesets but refused to push
1773 - other values as described by addchangegroup()
1773 - other values as described by addchangegroup()
1774 '''
1774 '''
1775 if remote.local():
1775 if remote.local():
1776 missing = set(self.requirements) - remote.local().supported
1776 missing = set(self.requirements) - remote.local().supported
1777 if missing:
1777 if missing:
1778 msg = _("required features are not"
1778 msg = _("required features are not"
1779 " supported in the destination:"
1779 " supported in the destination:"
1780 " %s") % (', '.join(sorted(missing)))
1780 " %s") % (', '.join(sorted(missing)))
1781 raise util.Abort(msg)
1781 raise util.Abort(msg)
1782
1782
1783 # there are two ways to push to remote repo:
1783 # there are two ways to push to remote repo:
1784 #
1784 #
1785 # addchangegroup assumes local user can lock remote
1785 # addchangegroup assumes local user can lock remote
1786 # repo (local filesystem, old ssh servers).
1786 # repo (local filesystem, old ssh servers).
1787 #
1787 #
1788 # unbundle assumes local user cannot lock remote repo (new ssh
1788 # unbundle assumes local user cannot lock remote repo (new ssh
1789 # servers, http servers).
1789 # servers, http servers).
1790
1790
1791 if not remote.canpush():
1791 if not remote.canpush():
1792 raise util.Abort(_("destination does not support push"))
1792 raise util.Abort(_("destination does not support push"))
1793 unfi = self.unfiltered()
1793 unfi = self.unfiltered()
1794 def localphasemove(nodes, phase=phases.public):
1794 def localphasemove(nodes, phase=phases.public):
1795 """move <nodes> to <phase> in the local source repo"""
1795 """move <nodes> to <phase> in the local source repo"""
1796 if locallock is not None:
1796 if locallock is not None:
1797 phases.advanceboundary(self, phase, nodes)
1797 phases.advanceboundary(self, phase, nodes)
1798 else:
1798 else:
1799 # repo is not locked, do not change any phases!
1799 # repo is not locked, do not change any phases!
1800 # Informs the user that phases should have been moved when
1800 # Informs the user that phases should have been moved when
1801 # applicable.
1801 # applicable.
1802 actualmoves = [n for n in nodes if phase < self[n].phase()]
1802 actualmoves = [n for n in nodes if phase < self[n].phase()]
1803 phasestr = phases.phasenames[phase]
1803 phasestr = phases.phasenames[phase]
1804 if actualmoves:
1804 if actualmoves:
1805 self.ui.status(_('cannot lock source repo, skipping local'
1805 self.ui.status(_('cannot lock source repo, skipping local'
1806 ' %s phase update\n') % phasestr)
1806 ' %s phase update\n') % phasestr)
1807 # get local lock as we might write phase data
1807 # get local lock as we might write phase data
1808 locallock = None
1808 locallock = None
1809 try:
1809 try:
1810 locallock = self.lock()
1810 locallock = self.lock()
1811 except IOError, err:
1811 except IOError, err:
1812 if err.errno != errno.EACCES:
1812 if err.errno != errno.EACCES:
1813 raise
1813 raise
1814 # source repo cannot be locked.
1814 # source repo cannot be locked.
1815 # We do not abort the push, but just disable the local phase
1815 # We do not abort the push, but just disable the local phase
1816 # synchronisation.
1816 # synchronisation.
1817 msg = 'cannot lock source repository: %s\n' % err
1817 msg = 'cannot lock source repository: %s\n' % err
1818 self.ui.debug(msg)
1818 self.ui.debug(msg)
1819 try:
1819 try:
1820 self.checkpush(force, revs)
1820 self.checkpush(force, revs)
1821 lock = None
1821 lock = None
1822 unbundle = remote.capable('unbundle')
1822 unbundle = remote.capable('unbundle')
1823 if not unbundle:
1823 if not unbundle:
1824 lock = remote.lock()
1824 lock = remote.lock()
1825 try:
1825 try:
1826 # discovery
1826 # discovery
1827 fci = discovery.findcommonincoming
1827 fci = discovery.findcommonincoming
1828 commoninc = fci(unfi, remote, force=force)
1828 commoninc = fci(unfi, remote, force=force)
1829 common, inc, remoteheads = commoninc
1829 common, inc, remoteheads = commoninc
1830 fco = discovery.findcommonoutgoing
1830 fco = discovery.findcommonoutgoing
1831 outgoing = fco(unfi, remote, onlyheads=revs,
1831 outgoing = fco(unfi, remote, onlyheads=revs,
1832 commoninc=commoninc, force=force)
1832 commoninc=commoninc, force=force)
1833
1833
1834
1834
1835 if not outgoing.missing:
1835 if not outgoing.missing:
1836 # nothing to push
1836 # nothing to push
1837 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1837 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1838 ret = None
1838 ret = None
1839 else:
1839 else:
1840 # something to push
1840 # something to push
1841 if not force:
1841 if not force:
1842 # if self.obsstore == False --> no obsolete
1842 # if self.obsstore == False --> no obsolete
1843 # then, save the iteration
1843 # then, save the iteration
1844 if unfi.obsstore:
1844 if unfi.obsstore:
1845 # this message are here for 80 char limit reason
1845 # this message are here for 80 char limit reason
1846 mso = _("push includes obsolete changeset: %s!")
1846 mso = _("push includes obsolete changeset: %s!")
1847 mst = "push includes %s changeset: %s!"
1847 mst = "push includes %s changeset: %s!"
1848 # plain versions for i18n tool to detect them
1848 # plain versions for i18n tool to detect them
1849 _("push includes unstable changeset: %s!")
1849 _("push includes unstable changeset: %s!")
1850 _("push includes bumped changeset: %s!")
1850 _("push includes bumped changeset: %s!")
1851 _("push includes divergent changeset: %s!")
1851 _("push includes divergent changeset: %s!")
1852 # If we are to push if there is at least one
1852 # If we are to push if there is at least one
1853 # obsolete or unstable changeset in missing, at
1853 # obsolete or unstable changeset in missing, at
1854 # least one of the missinghead will be obsolete or
1854 # least one of the missinghead will be obsolete or
1855 # unstable. So checking heads only is ok
1855 # unstable. So checking heads only is ok
1856 for node in outgoing.missingheads:
1856 for node in outgoing.missingheads:
1857 ctx = unfi[node]
1857 ctx = unfi[node]
1858 if ctx.obsolete():
1858 if ctx.obsolete():
1859 raise util.Abort(mso % ctx)
1859 raise util.Abort(mso % ctx)
1860 elif ctx.troubled():
1860 elif ctx.troubled():
1861 raise util.Abort(_(mst)
1861 raise util.Abort(_(mst)
1862 % (ctx.troubles()[0],
1862 % (ctx.troubles()[0],
1863 ctx))
1863 ctx))
1864 discovery.checkheads(unfi, remote, outgoing,
1864 discovery.checkheads(unfi, remote, outgoing,
1865 remoteheads, newbranch,
1865 remoteheads, newbranch,
1866 bool(inc))
1866 bool(inc))
1867
1867
1868 # TODO: get bundlecaps from remote
1868 # TODO: get bundlecaps from remote
1869 bundlecaps = None
1869 bundlecaps = None
1870 # create a changegroup from local
1870 # create a changegroup from local
1871 if revs is None and not outgoing.excluded:
1871 if revs is None and not outgoing.excluded:
1872 # push everything,
1872 # push everything,
1873 # use the fast path, no race possible on push
1873 # use the fast path, no race possible on push
1874 bundler = changegroup.bundle10(self, bundlecaps)
1874 bundler = changegroup.bundle10(self, bundlecaps)
1875 cg = self._changegroupsubset(outgoing,
1875 cg = self._changegroupsubset(outgoing,
1876 bundler,
1876 bundler,
1877 'push',
1877 'push',
1878 fastpath=True)
1878 fastpath=True)
1879 else:
1879 else:
1880 cg = self.getlocalbundle('push', outgoing, bundlecaps)
1880 cg = self.getlocalbundle('push', outgoing, bundlecaps)
1881
1881
1882 # apply changegroup to remote
1882 # apply changegroup to remote
1883 if unbundle:
1883 if unbundle:
1884 # local repo finds heads on server, finds out what
1884 # local repo finds heads on server, finds out what
1885 # revs it must push. once revs transferred, if server
1885 # revs it must push. once revs transferred, if server
1886 # finds it has different heads (someone else won
1886 # finds it has different heads (someone else won
1887 # commit/push race), server aborts.
1887 # commit/push race), server aborts.
1888 if force:
1888 if force:
1889 remoteheads = ['force']
1889 remoteheads = ['force']
1890 # ssh: return remote's addchangegroup()
1890 # ssh: return remote's addchangegroup()
1891 # http: return remote's addchangegroup() or 0 for error
1891 # http: return remote's addchangegroup() or 0 for error
1892 ret = remote.unbundle(cg, remoteheads, 'push')
1892 ret = remote.unbundle(cg, remoteheads, 'push')
1893 else:
1893 else:
1894 # we return an integer indicating remote head count
1894 # we return an integer indicating remote head count
1895 # change
1895 # change
1896 ret = remote.addchangegroup(cg, 'push', self.url())
1896 ret = remote.addchangegroup(cg, 'push', self.url())
1897
1897
1898 if ret:
1898 if ret:
1899 # push succeed, synchronize target of the push
1899 # push succeed, synchronize target of the push
1900 cheads = outgoing.missingheads
1900 cheads = outgoing.missingheads
1901 elif revs is None:
1901 elif revs is None:
1902 # All out push fails. synchronize all common
1902 # All out push fails. synchronize all common
1903 cheads = outgoing.commonheads
1903 cheads = outgoing.commonheads
1904 else:
1904 else:
1905 # I want cheads = heads(::missingheads and ::commonheads)
1905 # I want cheads = heads(::missingheads and ::commonheads)
1906 # (missingheads is revs with secret changeset filtered out)
1906 # (missingheads is revs with secret changeset filtered out)
1907 #
1907 #
1908 # This can be expressed as:
1908 # This can be expressed as:
1909 # cheads = ( (missingheads and ::commonheads)
1909 # cheads = ( (missingheads and ::commonheads)
1910 # + (commonheads and ::missingheads))"
1910 # + (commonheads and ::missingheads))"
1911 # )
1911 # )
1912 #
1912 #
1913 # while trying to push we already computed the following:
1913 # while trying to push we already computed the following:
1914 # common = (::commonheads)
1914 # common = (::commonheads)
1915 # missing = ((commonheads::missingheads) - commonheads)
1915 # missing = ((commonheads::missingheads) - commonheads)
1916 #
1916 #
1917 # We can pick:
1917 # We can pick:
1918 # * missingheads part of common (::commonheads)
1918 # * missingheads part of common (::commonheads)
1919 common = set(outgoing.common)
1919 common = set(outgoing.common)
1920 cheads = [node for node in revs if node in common]
1920 cheads = [node for node in revs if node in common]
1921 # and
1921 # and
1922 # * commonheads parents on missing
1922 # * commonheads parents on missing
1923 revset = unfi.set('%ln and parents(roots(%ln))',
1923 revset = unfi.set('%ln and parents(roots(%ln))',
1924 outgoing.commonheads,
1924 outgoing.commonheads,
1925 outgoing.missing)
1925 outgoing.missing)
1926 cheads.extend(c.node() for c in revset)
1926 cheads.extend(c.node() for c in revset)
1927 # even when we don't push, exchanging phase data is useful
1927 # even when we don't push, exchanging phase data is useful
1928 remotephases = remote.listkeys('phases')
1928 remotephases = remote.listkeys('phases')
1929 if (self.ui.configbool('ui', '_usedassubrepo', False)
1929 if (self.ui.configbool('ui', '_usedassubrepo', False)
1930 and remotephases # server supports phases
1930 and remotephases # server supports phases
1931 and ret is None # nothing was pushed
1931 and ret is None # nothing was pushed
1932 and remotephases.get('publishing', False)):
1932 and remotephases.get('publishing', False)):
1933 # When:
1933 # When:
1934 # - this is a subrepo push
1934 # - this is a subrepo push
1935 # - and remote support phase
1935 # - and remote support phase
1936 # - and no changeset was pushed
1936 # - and no changeset was pushed
1937 # - and remote is publishing
1937 # - and remote is publishing
1938 # We may be in issue 3871 case!
1938 # We may be in issue 3871 case!
1939 # We drop the possible phase synchronisation done by
1939 # We drop the possible phase synchronisation done by
1940 # courtesy to publish changesets possibly locally draft
1940 # courtesy to publish changesets possibly locally draft
1941 # on the remote.
1941 # on the remote.
1942 remotephases = {'publishing': 'True'}
1942 remotephases = {'publishing': 'True'}
1943 if not remotephases: # old server or public only repo
1943 if not remotephases: # old server or public only repo
1944 localphasemove(cheads)
1944 localphasemove(cheads)
1945 # don't push any phase data as there is nothing to push
1945 # don't push any phase data as there is nothing to push
1946 else:
1946 else:
1947 ana = phases.analyzeremotephases(self, cheads, remotephases)
1947 ana = phases.analyzeremotephases(self, cheads, remotephases)
1948 pheads, droots = ana
1948 pheads, droots = ana
1949 ### Apply remote phase on local
1949 ### Apply remote phase on local
1950 if remotephases.get('publishing', False):
1950 if remotephases.get('publishing', False):
1951 localphasemove(cheads)
1951 localphasemove(cheads)
1952 else: # publish = False
1952 else: # publish = False
1953 localphasemove(pheads)
1953 localphasemove(pheads)
1954 localphasemove(cheads, phases.draft)
1954 localphasemove(cheads, phases.draft)
1955 ### Apply local phase on remote
1955 ### Apply local phase on remote
1956
1956
1957 # Get the list of all revs draft on remote by public here.
1957 # Get the list of all revs draft on remote by public here.
1958 # XXX Beware that revset break if droots is not strictly
1958 # XXX Beware that revset break if droots is not strictly
1959 # XXX root we may want to ensure it is but it is costly
1959 # XXX root we may want to ensure it is but it is costly
1960 outdated = unfi.set('heads((%ln::%ln) and public())',
1960 outdated = unfi.set('heads((%ln::%ln) and public())',
1961 droots, cheads)
1961 droots, cheads)
1962 for newremotehead in outdated:
1962 for newremotehead in outdated:
1963 r = remote.pushkey('phases',
1963 r = remote.pushkey('phases',
1964 newremotehead.hex(),
1964 newremotehead.hex(),
1965 str(phases.draft),
1965 str(phases.draft),
1966 str(phases.public))
1966 str(phases.public))
1967 if not r:
1967 if not r:
1968 self.ui.warn(_('updating %s to public failed!\n')
1968 self.ui.warn(_('updating %s to public failed!\n')
1969 % newremotehead)
1969 % newremotehead)
1970 self.ui.debug('try to push obsolete markers to remote\n')
1970 self.ui.debug('try to push obsolete markers to remote\n')
1971 obsolete.syncpush(self, remote)
1971 obsolete.syncpush(self, remote)
1972 finally:
1972 finally:
1973 if lock is not None:
1973 if lock is not None:
1974 lock.release()
1974 lock.release()
1975 finally:
1975 finally:
1976 if locallock is not None:
1976 if locallock is not None:
1977 locallock.release()
1977 locallock.release()
1978
1978
1979 bookmarks.updateremote(self.ui, unfi, remote, revs)
1979 bookmarks.updateremote(self.ui, unfi, remote, revs)
1980 return ret
1980 return ret
1981
1981
1982 def changegroupinfo(self, nodes, source):
1982 def changegroupinfo(self, nodes, source):
1983 if self.ui.verbose or source == 'bundle':
1983 if self.ui.verbose or source == 'bundle':
1984 self.ui.status(_("%d changesets found\n") % len(nodes))
1984 self.ui.status(_("%d changesets found\n") % len(nodes))
1985 if self.ui.debugflag:
1985 if self.ui.debugflag:
1986 self.ui.debug("list of changesets:\n")
1986 self.ui.debug("list of changesets:\n")
1987 for node in nodes:
1987 for node in nodes:
1988 self.ui.debug("%s\n" % hex(node))
1988 self.ui.debug("%s\n" % hex(node))
1989
1989
1990 def changegroupsubset(self, bases, heads, source):
1990 def changegroupsubset(self, bases, heads, source):
1991 """Compute a changegroup consisting of all the nodes that are
1991 """Compute a changegroup consisting of all the nodes that are
1992 descendants of any of the bases and ancestors of any of the heads.
1992 descendants of any of the bases and ancestors of any of the heads.
1993 Return a chunkbuffer object whose read() method will return
1993 Return a chunkbuffer object whose read() method will return
1994 successive changegroup chunks.
1994 successive changegroup chunks.
1995
1995
1996 It is fairly complex as determining which filenodes and which
1996 It is fairly complex as determining which filenodes and which
1997 manifest nodes need to be included for the changeset to be complete
1997 manifest nodes need to be included for the changeset to be complete
1998 is non-trivial.
1998 is non-trivial.
1999
1999
2000 Another wrinkle is doing the reverse, figuring out which changeset in
2000 Another wrinkle is doing the reverse, figuring out which changeset in
2001 the changegroup a particular filenode or manifestnode belongs to.
2001 the changegroup a particular filenode or manifestnode belongs to.
2002 """
2002 """
2003 cl = self.changelog
2003 cl = self.changelog
2004 if not bases:
2004 if not bases:
2005 bases = [nullid]
2005 bases = [nullid]
2006 # TODO: remove call to nodesbetween.
2006 # TODO: remove call to nodesbetween.
2007 csets, bases, heads = cl.nodesbetween(bases, heads)
2007 csets, bases, heads = cl.nodesbetween(bases, heads)
2008 bases = [p for n in bases for p in cl.parents(n) if p != nullid]
2008 bases = [p for n in bases for p in cl.parents(n) if p != nullid]
2009 outgoing = discovery.outgoing(cl, bases, heads)
2009 outgoing = discovery.outgoing(cl, bases, heads)
2010 bundler = changegroup.bundle10(self)
2010 bundler = changegroup.bundle10(self)
2011 return self._changegroupsubset(outgoing, bundler, source)
2011 return self._changegroupsubset(outgoing, bundler, source)
2012
2012
2013 def getlocalbundle(self, source, outgoing, bundlecaps=None):
2013 def getlocalbundle(self, source, outgoing, bundlecaps=None):
2014 """Like getbundle, but taking a discovery.outgoing as an argument.
2014 """Like getbundle, but taking a discovery.outgoing as an argument.
2015
2015
2016 This is only implemented for local repos and reuses potentially
2016 This is only implemented for local repos and reuses potentially
2017 precomputed sets in outgoing."""
2017 precomputed sets in outgoing."""
2018 if not outgoing.missing:
2018 if not outgoing.missing:
2019 return None
2019 return None
2020 bundler = changegroup.bundle10(self, bundlecaps)
2020 bundler = changegroup.bundle10(self, bundlecaps)
2021 return self._changegroupsubset(outgoing, bundler, source)
2021 return self._changegroupsubset(outgoing, bundler, source)
2022
2022
2023 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
2023 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
2024 """Like changegroupsubset, but returns the set difference between the
2024 """Like changegroupsubset, but returns the set difference between the
2025 ancestors of heads and the ancestors common.
2025 ancestors of heads and the ancestors common.
2026
2026
2027 If heads is None, use the local heads. If common is None, use [nullid].
2027 If heads is None, use the local heads. If common is None, use [nullid].
2028
2028
2029 The nodes in common might not all be known locally due to the way the
2029 The nodes in common might not all be known locally due to the way the
2030 current discovery protocol works.
2030 current discovery protocol works.
2031 """
2031 """
2032 cl = self.changelog
2032 cl = self.changelog
2033 if common:
2033 if common:
2034 hasnode = cl.hasnode
2034 hasnode = cl.hasnode
2035 common = [n for n in common if hasnode(n)]
2035 common = [n for n in common if hasnode(n)]
2036 else:
2036 else:
2037 common = [nullid]
2037 common = [nullid]
2038 if not heads:
2038 if not heads:
2039 heads = cl.heads()
2039 heads = cl.heads()
2040 return self.getlocalbundle(source,
2040 return self.getlocalbundle(source,
2041 discovery.outgoing(cl, common, heads),
2041 discovery.outgoing(cl, common, heads),
2042 bundlecaps=bundlecaps)
2042 bundlecaps=bundlecaps)
2043
2043
2044 @unfilteredmethod
2044 @unfilteredmethod
2045 def _changegroupsubset(self, outgoing, bundler, source,
2045 def _changegroupsubset(self, outgoing, bundler, source,
2046 fastpath=False):
2046 fastpath=False):
2047 commonrevs = outgoing.common
2047 commonrevs = outgoing.common
2048 csets = outgoing.missing
2048 csets = outgoing.missing
2049 heads = outgoing.missingheads
2049 heads = outgoing.missingheads
2050 # We go through the fast path if we get told to, or if all (unfiltered
2050 # We go through the fast path if we get told to, or if all (unfiltered
2051 # heads have been requested (since we then know there all linkrevs will
2051 # heads have been requested (since we then know there all linkrevs will
2052 # be pulled by the client).
2052 # be pulled by the client).
2053 heads.sort()
2053 heads.sort()
2054 fastpathlinkrev = fastpath or (
2054 fastpathlinkrev = fastpath or (
2055 self.filtername is None and heads == sorted(self.heads()))
2055 self.filtername is None and heads == sorted(self.heads()))
2056
2056
2057 self.hook('preoutgoing', throw=True, source=source)
2057 self.hook('preoutgoing', throw=True, source=source)
2058 self.changegroupinfo(csets, source)
2058 self.changegroupinfo(csets, source)
2059 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
2059 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
2060 return changegroup.unbundle10(util.chunkbuffer(gengroup), 'UN')
2060 return changegroup.unbundle10(util.chunkbuffer(gengroup), 'UN')
2061
2061
2062 def changegroup(self, basenodes, source):
2062 def changegroup(self, basenodes, source):
2063 # to avoid a race we use changegroupsubset() (issue1320)
2063 # to avoid a race we use changegroupsubset() (issue1320)
2064 return self.changegroupsubset(basenodes, self.heads(), source)
2064 return self.changegroupsubset(basenodes, self.heads(), source)
2065
2065
2066 @unfilteredmethod
2066 @unfilteredmethod
2067 def addchangegroup(self, source, srctype, url, emptyok=False):
2067 def addchangegroup(self, source, srctype, url, emptyok=False):
2068 """Add the changegroup returned by source.read() to this repo.
2068 """Add the changegroup returned by source.read() to this repo.
2069 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2069 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2070 the URL of the repo where this changegroup is coming from.
2070 the URL of the repo where this changegroup is coming from.
2071
2071
2072 Return an integer summarizing the change to this repo:
2072 Return an integer summarizing the change to this repo:
2073 - nothing changed or no source: 0
2073 - nothing changed or no source: 0
2074 - more heads than before: 1+added heads (2..n)
2074 - more heads than before: 1+added heads (2..n)
2075 - fewer heads than before: -1-removed heads (-2..-n)
2075 - fewer heads than before: -1-removed heads (-2..-n)
2076 - number of heads stays the same: 1
2076 - number of heads stays the same: 1
2077 """
2077 """
2078 def csmap(x):
2078 def csmap(x):
2079 self.ui.debug("add changeset %s\n" % short(x))
2079 self.ui.debug("add changeset %s\n" % short(x))
2080 return len(cl)
2080 return len(cl)
2081
2081
2082 def revmap(x):
2082 def revmap(x):
2083 return cl.rev(x)
2083 return cl.rev(x)
2084
2084
2085 if not source:
2085 if not source:
2086 return 0
2086 return 0
2087
2087
2088 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2088 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2089
2089
2090 changesets = files = revisions = 0
2090 changesets = files = revisions = 0
2091 efiles = set()
2091 efiles = set()
2092
2092
2093 # write changelog data to temp files so concurrent readers will not see
2093 # write changelog data to temp files so concurrent readers will not see
2094 # inconsistent view
2094 # inconsistent view
2095 cl = self.changelog
2095 cl = self.changelog
2096 cl.delayupdate()
2096 cl.delayupdate()
2097 oldheads = cl.heads()
2097 oldheads = cl.heads()
2098
2098
2099 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2099 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2100 try:
2100 try:
2101 trp = weakref.proxy(tr)
2101 trp = weakref.proxy(tr)
2102 # pull off the changeset group
2102 # pull off the changeset group
2103 self.ui.status(_("adding changesets\n"))
2103 self.ui.status(_("adding changesets\n"))
2104 clstart = len(cl)
2104 clstart = len(cl)
2105 class prog(object):
2105 class prog(object):
2106 step = _('changesets')
2106 step = _('changesets')
2107 count = 1
2107 count = 1
2108 ui = self.ui
2108 ui = self.ui
2109 total = None
2109 total = None
2110 def __call__(self):
2110 def __call__(self):
2111 self.ui.progress(self.step, self.count, unit=_('chunks'),
2111 self.ui.progress(self.step, self.count, unit=_('chunks'),
2112 total=self.total)
2112 total=self.total)
2113 self.count += 1
2113 self.count += 1
2114 pr = prog()
2114 pr = prog()
2115 source.callback = pr
2115 source.callback = pr
2116
2116
2117 source.changelogheader()
2117 source.changelogheader()
2118 srccontent = cl.addgroup(source, csmap, trp)
2118 srccontent = cl.addgroup(source, csmap, trp)
2119 if not (srccontent or emptyok):
2119 if not (srccontent or emptyok):
2120 raise util.Abort(_("received changelog group is empty"))
2120 raise util.Abort(_("received changelog group is empty"))
2121 clend = len(cl)
2121 clend = len(cl)
2122 changesets = clend - clstart
2122 changesets = clend - clstart
2123 for c in xrange(clstart, clend):
2123 for c in xrange(clstart, clend):
2124 efiles.update(self[c].files())
2124 efiles.update(self[c].files())
2125 efiles = len(efiles)
2125 efiles = len(efiles)
2126 self.ui.progress(_('changesets'), None)
2126 self.ui.progress(_('changesets'), None)
2127
2127
2128 # pull off the manifest group
2128 # pull off the manifest group
2129 self.ui.status(_("adding manifests\n"))
2129 self.ui.status(_("adding manifests\n"))
2130 pr.step = _('manifests')
2130 pr.step = _('manifests')
2131 pr.count = 1
2131 pr.count = 1
2132 pr.total = changesets # manifests <= changesets
2132 pr.total = changesets # manifests <= changesets
2133 # no need to check for empty manifest group here:
2133 # no need to check for empty manifest group here:
2134 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2134 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2135 # no new manifest will be created and the manifest group will
2135 # no new manifest will be created and the manifest group will
2136 # be empty during the pull
2136 # be empty during the pull
2137 source.manifestheader()
2137 source.manifestheader()
2138 self.manifest.addgroup(source, revmap, trp)
2138 self.manifest.addgroup(source, revmap, trp)
2139 self.ui.progress(_('manifests'), None)
2139 self.ui.progress(_('manifests'), None)
2140
2140
2141 needfiles = {}
2141 needfiles = {}
2142 if self.ui.configbool('server', 'validate', default=False):
2142 if self.ui.configbool('server', 'validate', default=False):
2143 # validate incoming csets have their manifests
2143 # validate incoming csets have their manifests
2144 for cset in xrange(clstart, clend):
2144 for cset in xrange(clstart, clend):
2145 mfest = self.changelog.read(self.changelog.node(cset))[0]
2145 mfest = self.changelog.read(self.changelog.node(cset))[0]
2146 mfest = self.manifest.readdelta(mfest)
2146 mfest = self.manifest.readdelta(mfest)
2147 # store file nodes we must see
2147 # store file nodes we must see
2148 for f, n in mfest.iteritems():
2148 for f, n in mfest.iteritems():
2149 needfiles.setdefault(f, set()).add(n)
2149 needfiles.setdefault(f, set()).add(n)
2150
2150
2151 # process the files
2151 # process the files
2152 self.ui.status(_("adding file changes\n"))
2152 self.ui.status(_("adding file changes\n"))
2153 pr.step = _('files')
2153 pr.step = _('files')
2154 pr.count = 1
2154 pr.count = 1
2155 pr.total = efiles
2155 pr.total = efiles
2156 source.callback = None
2156 source.callback = None
2157
2157
2158 newrevs, newfiles = self.addchangegroupfiles(source, revmap, trp,
2158 newrevs, newfiles = self.addchangegroupfiles(source, revmap, trp,
2159 pr, needfiles)
2159 pr, needfiles)
2160 revisions += newrevs
2160 revisions += newrevs
2161 files += newfiles
2161 files += newfiles
2162
2162
2163 dh = 0
2163 dh = 0
2164 if oldheads:
2164 if oldheads:
2165 heads = cl.heads()
2165 heads = cl.heads()
2166 dh = len(heads) - len(oldheads)
2166 dh = len(heads) - len(oldheads)
2167 for h in heads:
2167 for h in heads:
2168 if h not in oldheads and self[h].closesbranch():
2168 if h not in oldheads and self[h].closesbranch():
2169 dh -= 1
2169 dh -= 1
2170 htext = ""
2170 htext = ""
2171 if dh:
2171 if dh:
2172 htext = _(" (%+d heads)") % dh
2172 htext = _(" (%+d heads)") % dh
2173
2173
2174 self.ui.status(_("added %d changesets"
2174 self.ui.status(_("added %d changesets"
2175 " with %d changes to %d files%s\n")
2175 " with %d changes to %d files%s\n")
2176 % (changesets, revisions, files, htext))
2176 % (changesets, revisions, files, htext))
2177 self.invalidatevolatilesets()
2177 self.invalidatevolatilesets()
2178
2178
2179 if changesets > 0:
2179 if changesets > 0:
2180 p = lambda: cl.writepending() and self.root or ""
2180 p = lambda: cl.writepending() and self.root or ""
2181 self.hook('pretxnchangegroup', throw=True,
2181 self.hook('pretxnchangegroup', throw=True,
2182 node=hex(cl.node(clstart)), source=srctype,
2182 node=hex(cl.node(clstart)), source=srctype,
2183 url=url, pending=p)
2183 url=url, pending=p)
2184
2184
2185 added = [cl.node(r) for r in xrange(clstart, clend)]
2185 added = [cl.node(r) for r in xrange(clstart, clend)]
2186 publishing = self.ui.configbool('phases', 'publish', True)
2186 publishing = self.ui.configbool('phases', 'publish', True)
2187 if srctype == 'push':
2187 if srctype == 'push':
2188 # Old server can not push the boundary themself.
2188 # Old server can not push the boundary themself.
2189 # New server won't push the boundary if changeset already
2189 # New server won't push the boundary if changeset already
2190 # existed locally as secrete
2190 # existed locally as secrete
2191 #
2191 #
2192 # We should not use added here but the list of all change in
2192 # We should not use added here but the list of all change in
2193 # the bundle
2193 # the bundle
2194 if publishing:
2194 if publishing:
2195 phases.advanceboundary(self, phases.public, srccontent)
2195 phases.advanceboundary(self, phases.public, srccontent)
2196 else:
2196 else:
2197 phases.advanceboundary(self, phases.draft, srccontent)
2197 phases.advanceboundary(self, phases.draft, srccontent)
2198 phases.retractboundary(self, phases.draft, added)
2198 phases.retractboundary(self, phases.draft, added)
2199 elif srctype != 'strip':
2199 elif srctype != 'strip':
2200 # publishing only alter behavior during push
2200 # publishing only alter behavior during push
2201 #
2201 #
2202 # strip should not touch boundary at all
2202 # strip should not touch boundary at all
2203 phases.retractboundary(self, phases.draft, added)
2203 phases.retractboundary(self, phases.draft, added)
2204
2204
2205 # make changelog see real files again
2205 # make changelog see real files again
2206 cl.finalize(trp)
2206 cl.finalize(trp)
2207
2207
2208 tr.close()
2208 tr.close()
2209
2209
2210 if changesets > 0:
2210 if changesets > 0:
2211 if srctype != 'strip':
2211 if srctype != 'strip':
2212 # During strip, branchcache is invalid but coming call to
2212 # During strip, branchcache is invalid but coming call to
2213 # `destroyed` will repair it.
2213 # `destroyed` will repair it.
2214 # In other case we can safely update cache on disk.
2214 # In other case we can safely update cache on disk.
2215 branchmap.updatecache(self.filtered('served'))
2215 branchmap.updatecache(self.filtered('served'))
2216 def runhooks():
2216 def runhooks():
2217 # These hooks run when the lock releases, not when the
2217 # These hooks run when the lock releases, not when the
2218 # transaction closes. So it's possible for the changelog
2218 # transaction closes. So it's possible for the changelog
2219 # to have changed since we last saw it.
2219 # to have changed since we last saw it.
2220 if clstart >= len(self):
2220 if clstart >= len(self):
2221 return
2221 return
2222
2222
2223 # forcefully update the on-disk branch cache
2223 # forcefully update the on-disk branch cache
2224 self.ui.debug("updating the branch cache\n")
2224 self.ui.debug("updating the branch cache\n")
2225 self.hook("changegroup", node=hex(cl.node(clstart)),
2225 self.hook("changegroup", node=hex(cl.node(clstart)),
2226 source=srctype, url=url)
2226 source=srctype, url=url)
2227
2227
2228 for n in added:
2228 for n in added:
2229 self.hook("incoming", node=hex(n), source=srctype,
2229 self.hook("incoming", node=hex(n), source=srctype,
2230 url=url)
2230 url=url)
2231
2231
2232 newheads = [h for h in self.heads() if h not in oldheads]
2232 newheads = [h for h in self.heads() if h not in oldheads]
2233 self.ui.log("incoming",
2233 self.ui.log("incoming",
2234 "%s incoming changes - new heads: %s\n",
2234 "%s incoming changes - new heads: %s\n",
2235 len(added),
2235 len(added),
2236 ', '.join([hex(c[:6]) for c in newheads]))
2236 ', '.join([hex(c[:6]) for c in newheads]))
2237 self._afterlock(runhooks)
2237 self._afterlock(runhooks)
2238
2238
2239 finally:
2239 finally:
2240 tr.release()
2240 tr.release()
2241 # never return 0 here:
2241 # never return 0 here:
2242 if dh < 0:
2242 if dh < 0:
2243 return dh - 1
2243 return dh - 1
2244 else:
2244 else:
2245 return dh + 1
2245 return dh + 1
2246
2246
2247 def addchangegroupfiles(self, source, revmap, trp, pr, needfiles):
2247 def addchangegroupfiles(self, source, revmap, trp, pr, needfiles):
2248 revisions = 0
2248 revisions = 0
2249 files = 0
2249 files = 0
2250 while True:
2250 while True:
2251 chunkdata = source.filelogheader()
2251 chunkdata = source.filelogheader()
2252 if not chunkdata:
2252 if not chunkdata:
2253 break
2253 break
2254 f = chunkdata["filename"]
2254 f = chunkdata["filename"]
2255 self.ui.debug("adding %s revisions\n" % f)
2255 self.ui.debug("adding %s revisions\n" % f)
2256 pr()
2256 pr()
2257 fl = self.file(f)
2257 fl = self.file(f)
2258 o = len(fl)
2258 o = len(fl)
2259 if not fl.addgroup(source, revmap, trp):
2259 if not fl.addgroup(source, revmap, trp):
2260 raise util.Abort(_("received file revlog group is empty"))
2260 raise util.Abort(_("received file revlog group is empty"))
2261 revisions += len(fl) - o
2261 revisions += len(fl) - o
2262 files += 1
2262 files += 1
2263 if f in needfiles:
2263 if f in needfiles:
2264 needs = needfiles[f]
2264 needs = needfiles[f]
2265 for new in xrange(o, len(fl)):
2265 for new in xrange(o, len(fl)):
2266 n = fl.node(new)
2266 n = fl.node(new)
2267 if n in needs:
2267 if n in needs:
2268 needs.remove(n)
2268 needs.remove(n)
2269 else:
2269 else:
2270 raise util.Abort(
2270 raise util.Abort(
2271 _("received spurious file revlog entry"))
2271 _("received spurious file revlog entry"))
2272 if not needs:
2272 if not needs:
2273 del needfiles[f]
2273 del needfiles[f]
2274 self.ui.progress(_('files'), None)
2274 self.ui.progress(_('files'), None)
2275
2275
2276 for f, needs in needfiles.iteritems():
2276 for f, needs in needfiles.iteritems():
2277 fl = self.file(f)
2277 fl = self.file(f)
2278 for n in needs:
2278 for n in needs:
2279 try:
2279 try:
2280 fl.rev(n)
2280 fl.rev(n)
2281 except error.LookupError:
2281 except error.LookupError:
2282 raise util.Abort(
2282 raise util.Abort(
2283 _('missing file data for %s:%s - run hg verify') %
2283 _('missing file data for %s:%s - run hg verify') %
2284 (f, hex(n)))
2284 (f, hex(n)))
2285
2285
2286 return revisions, files
2286 return revisions, files
2287
2287
2288 def stream_in(self, remote, requirements):
2288 def stream_in(self, remote, requirements):
2289 lock = self.lock()
2289 lock = self.lock()
2290 try:
2290 try:
2291 # Save remote branchmap. We will use it later
2291 # Save remote branchmap. We will use it later
2292 # to speed up branchcache creation
2292 # to speed up branchcache creation
2293 rbranchmap = None
2293 rbranchmap = None
2294 if remote.capable("branchmap"):
2294 if remote.capable("branchmap"):
2295 rbranchmap = remote.branchmap()
2295 rbranchmap = remote.branchmap()
2296
2296
2297 fp = remote.stream_out()
2297 fp = remote.stream_out()
2298 l = fp.readline()
2298 l = fp.readline()
2299 try:
2299 try:
2300 resp = int(l)
2300 resp = int(l)
2301 except ValueError:
2301 except ValueError:
2302 raise error.ResponseError(
2302 raise error.ResponseError(
2303 _('unexpected response from remote server:'), l)
2303 _('unexpected response from remote server:'), l)
2304 if resp == 1:
2304 if resp == 1:
2305 raise util.Abort(_('operation forbidden by server'))
2305 raise util.Abort(_('operation forbidden by server'))
2306 elif resp == 2:
2306 elif resp == 2:
2307 raise util.Abort(_('locking the remote repository failed'))
2307 raise util.Abort(_('locking the remote repository failed'))
2308 elif resp != 0:
2308 elif resp != 0:
2309 raise util.Abort(_('the server sent an unknown error code'))
2309 raise util.Abort(_('the server sent an unknown error code'))
2310 self.ui.status(_('streaming all changes\n'))
2310 self.ui.status(_('streaming all changes\n'))
2311 l = fp.readline()
2311 l = fp.readline()
2312 try:
2312 try:
2313 total_files, total_bytes = map(int, l.split(' ', 1))
2313 total_files, total_bytes = map(int, l.split(' ', 1))
2314 except (ValueError, TypeError):
2314 except (ValueError, TypeError):
2315 raise error.ResponseError(
2315 raise error.ResponseError(
2316 _('unexpected response from remote server:'), l)
2316 _('unexpected response from remote server:'), l)
2317 self.ui.status(_('%d files to transfer, %s of data\n') %
2317 self.ui.status(_('%d files to transfer, %s of data\n') %
2318 (total_files, util.bytecount(total_bytes)))
2318 (total_files, util.bytecount(total_bytes)))
2319 handled_bytes = 0
2319 handled_bytes = 0
2320 self.ui.progress(_('clone'), 0, total=total_bytes)
2320 self.ui.progress(_('clone'), 0, total=total_bytes)
2321 start = time.time()
2321 start = time.time()
2322 for i in xrange(total_files):
2322 for i in xrange(total_files):
2323 # XXX doesn't support '\n' or '\r' in filenames
2323 # XXX doesn't support '\n' or '\r' in filenames
2324 l = fp.readline()
2324 l = fp.readline()
2325 try:
2325 try:
2326 name, size = l.split('\0', 1)
2326 name, size = l.split('\0', 1)
2327 size = int(size)
2327 size = int(size)
2328 except (ValueError, TypeError):
2328 except (ValueError, TypeError):
2329 raise error.ResponseError(
2329 raise error.ResponseError(
2330 _('unexpected response from remote server:'), l)
2330 _('unexpected response from remote server:'), l)
2331 if self.ui.debugflag:
2331 if self.ui.debugflag:
2332 self.ui.debug('adding %s (%s)\n' %
2332 self.ui.debug('adding %s (%s)\n' %
2333 (name, util.bytecount(size)))
2333 (name, util.bytecount(size)))
2334 # for backwards compat, name was partially encoded
2334 # for backwards compat, name was partially encoded
2335 ofp = self.sopener(store.decodedir(name), 'w')
2335 ofp = self.sopener(store.decodedir(name), 'w')
2336 for chunk in util.filechunkiter(fp, limit=size):
2336 for chunk in util.filechunkiter(fp, limit=size):
2337 handled_bytes += len(chunk)
2337 handled_bytes += len(chunk)
2338 self.ui.progress(_('clone'), handled_bytes,
2338 self.ui.progress(_('clone'), handled_bytes,
2339 total=total_bytes)
2339 total=total_bytes)
2340 ofp.write(chunk)
2340 ofp.write(chunk)
2341 ofp.close()
2341 ofp.close()
2342 elapsed = time.time() - start
2342 elapsed = time.time() - start
2343 if elapsed <= 0:
2343 if elapsed <= 0:
2344 elapsed = 0.001
2344 elapsed = 0.001
2345 self.ui.progress(_('clone'), None)
2345 self.ui.progress(_('clone'), None)
2346 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2346 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2347 (util.bytecount(total_bytes), elapsed,
2347 (util.bytecount(total_bytes), elapsed,
2348 util.bytecount(total_bytes / elapsed)))
2348 util.bytecount(total_bytes / elapsed)))
2349
2349
2350 # new requirements = old non-format requirements +
2350 # new requirements = old non-format requirements +
2351 # new format-related
2351 # new format-related
2352 # requirements from the streamed-in repository
2352 # requirements from the streamed-in repository
2353 requirements.update(set(self.requirements) - self.supportedformats)
2353 requirements.update(set(self.requirements) - self.supportedformats)
2354 self._applyrequirements(requirements)
2354 self._applyrequirements(requirements)
2355 self._writerequirements()
2355 self._writerequirements()
2356
2356
2357 if rbranchmap:
2357 if rbranchmap:
2358 rbheads = []
2358 rbheads = []
2359 for bheads in rbranchmap.itervalues():
2359 for bheads in rbranchmap.itervalues():
2360 rbheads.extend(bheads)
2360 rbheads.extend(bheads)
2361
2361
2362 if rbheads:
2362 if rbheads:
2363 rtiprev = max((int(self.changelog.rev(node))
2363 rtiprev = max((int(self.changelog.rev(node))
2364 for node in rbheads))
2364 for node in rbheads))
2365 cache = branchmap.branchcache(rbranchmap,
2365 cache = branchmap.branchcache(rbranchmap,
2366 self[rtiprev].node(),
2366 self[rtiprev].node(),
2367 rtiprev)
2367 rtiprev)
2368 # Try to stick it as low as possible
2368 # Try to stick it as low as possible
2369 # filter above served are unlikely to be fetch from a clone
2369 # filter above served are unlikely to be fetch from a clone
2370 for candidate in ('base', 'immutable', 'served'):
2370 for candidate in ('base', 'immutable', 'served'):
2371 rview = self.filtered(candidate)
2371 rview = self.filtered(candidate)
2372 if cache.validfor(rview):
2372 if cache.validfor(rview):
2373 self._branchcaches[candidate] = cache
2373 self._branchcaches[candidate] = cache
2374 cache.write(rview)
2374 cache.write(rview)
2375 break
2375 break
2376 self.invalidate()
2376 self.invalidate()
2377 return len(self.heads()) + 1
2377 return len(self.heads()) + 1
2378 finally:
2378 finally:
2379 lock.release()
2379 lock.release()
2380
2380
2381 def clone(self, remote, heads=[], stream=False):
2381 def clone(self, remote, heads=[], stream=False):
2382 '''clone remote repository.
2382 '''clone remote repository.
2383
2383
2384 keyword arguments:
2384 keyword arguments:
2385 heads: list of revs to clone (forces use of pull)
2385 heads: list of revs to clone (forces use of pull)
2386 stream: use streaming clone if possible'''
2386 stream: use streaming clone if possible'''
2387
2387
2388 # now, all clients that can request uncompressed clones can
2388 # now, all clients that can request uncompressed clones can
2389 # read repo formats supported by all servers that can serve
2389 # read repo formats supported by all servers that can serve
2390 # them.
2390 # them.
2391
2391
2392 # if revlog format changes, client will have to check version
2392 # if revlog format changes, client will have to check version
2393 # and format flags on "stream" capability, and use
2393 # and format flags on "stream" capability, and use
2394 # uncompressed only if compatible.
2394 # uncompressed only if compatible.
2395
2395
2396 if not stream:
2396 if not stream:
2397 # if the server explicitly prefers to stream (for fast LANs)
2397 # if the server explicitly prefers to stream (for fast LANs)
2398 stream = remote.capable('stream-preferred')
2398 stream = remote.capable('stream-preferred')
2399
2399
2400 if stream and not heads:
2400 if stream and not heads:
2401 # 'stream' means remote revlog format is revlogv1 only
2401 # 'stream' means remote revlog format is revlogv1 only
2402 if remote.capable('stream'):
2402 if remote.capable('stream'):
2403 return self.stream_in(remote, set(('revlogv1',)))
2403 return self.stream_in(remote, set(('revlogv1',)))
2404 # otherwise, 'streamreqs' contains the remote revlog format
2404 # otherwise, 'streamreqs' contains the remote revlog format
2405 streamreqs = remote.capable('streamreqs')
2405 streamreqs = remote.capable('streamreqs')
2406 if streamreqs:
2406 if streamreqs:
2407 streamreqs = set(streamreqs.split(','))
2407 streamreqs = set(streamreqs.split(','))
2408 # if we support it, stream in and adjust our requirements
2408 # if we support it, stream in and adjust our requirements
2409 if not streamreqs - self.supportedformats:
2409 if not streamreqs - self.supportedformats:
2410 return self.stream_in(remote, streamreqs)
2410 return self.stream_in(remote, streamreqs)
2411 return self.pull(remote, heads)
2411 return self.pull(remote, heads)
2412
2412
2413 def pushkey(self, namespace, key, old, new):
2413 def pushkey(self, namespace, key, old, new):
2414 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2414 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2415 old=old, new=new)
2415 old=old, new=new)
2416 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2416 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2417 ret = pushkey.push(self, namespace, key, old, new)
2417 ret = pushkey.push(self, namespace, key, old, new)
2418 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2418 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2419 ret=ret)
2419 ret=ret)
2420 return ret
2420 return ret
2421
2421
2422 def listkeys(self, namespace):
2422 def listkeys(self, namespace):
2423 self.hook('prelistkeys', throw=True, namespace=namespace)
2423 self.hook('prelistkeys', throw=True, namespace=namespace)
2424 self.ui.debug('listing keys for "%s"\n' % namespace)
2424 self.ui.debug('listing keys for "%s"\n' % namespace)
2425 values = pushkey.list(self, namespace)
2425 values = pushkey.list(self, namespace)
2426 self.hook('listkeys', namespace=namespace, values=values)
2426 self.hook('listkeys', namespace=namespace, values=values)
2427 return values
2427 return values
2428
2428
2429 def debugwireargs(self, one, two, three=None, four=None, five=None):
2429 def debugwireargs(self, one, two, three=None, four=None, five=None):
2430 '''used to test argument passing over the wire'''
2430 '''used to test argument passing over the wire'''
2431 return "%s %s %s %s %s" % (one, two, three, four, five)
2431 return "%s %s %s %s %s" % (one, two, three, four, five)
2432
2432
2433 def savecommitmessage(self, text):
2433 def savecommitmessage(self, text):
2434 fp = self.opener('last-message.txt', 'wb')
2434 fp = self.opener('last-message.txt', 'wb')
2435 try:
2435 try:
2436 fp.write(text)
2436 fp.write(text)
2437 finally:
2437 finally:
2438 fp.close()
2438 fp.close()
2439 return self.pathto(fp.name[len(self.root) + 1:])
2439 return self.pathto(fp.name[len(self.root) + 1:])
2440
2440
2441 # used to avoid circular references so destructors work
2441 # used to avoid circular references so destructors work
2442 def aftertrans(files):
2442 def aftertrans(files):
2443 renamefiles = [tuple(t) for t in files]
2443 renamefiles = [tuple(t) for t in files]
2444 def a():
2444 def a():
2445 for vfs, src, dest in renamefiles:
2445 for vfs, src, dest in renamefiles:
2446 try:
2446 try:
2447 vfs.rename(src, dest)
2447 vfs.rename(src, dest)
2448 except OSError: # journal file does not yet exist
2448 except OSError: # journal file does not yet exist
2449 pass
2449 pass
2450 return a
2450 return a
2451
2451
2452 def undoname(fn):
2452 def undoname(fn):
2453 base, name = os.path.split(fn)
2453 base, name = os.path.split(fn)
2454 assert name.startswith('journal')
2454 assert name.startswith('journal')
2455 return os.path.join(base, name.replace('journal', 'undo', 1))
2455 return os.path.join(base, name.replace('journal', 'undo', 1))
2456
2456
2457 def instance(ui, path, create):
2457 def instance(ui, path, create):
2458 return localrepository(ui, util.urllocalpath(path), create)
2458 return localrepository(ui, util.urllocalpath(path), create)
2459
2459
2460 def islocal(path):
2460 def islocal(path):
2461 return True
2461 return True
@@ -1,358 +1,358 b''
1 # match.py - filename matching
1 # match.py - filename matching
2 #
2 #
3 # Copyright 2008, 2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2008, 2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import re
8 import re
9 import scmutil, util, fileset
9 import util, fileset, pathutil
10 from i18n import _
10 from i18n import _
11
11
12 def _rematcher(pat):
12 def _rematcher(pat):
13 m = util.compilere(pat)
13 m = util.compilere(pat)
14 try:
14 try:
15 # slightly faster, provided by facebook's re2 bindings
15 # slightly faster, provided by facebook's re2 bindings
16 return m.test_match
16 return m.test_match
17 except AttributeError:
17 except AttributeError:
18 return m.match
18 return m.match
19
19
20 def _expandsets(pats, ctx):
20 def _expandsets(pats, ctx):
21 '''convert set: patterns into a list of files in the given context'''
21 '''convert set: patterns into a list of files in the given context'''
22 fset = set()
22 fset = set()
23 other = []
23 other = []
24
24
25 for kind, expr in pats:
25 for kind, expr in pats:
26 if kind == 'set':
26 if kind == 'set':
27 if not ctx:
27 if not ctx:
28 raise util.Abort("fileset expression with no context")
28 raise util.Abort("fileset expression with no context")
29 s = fileset.getfileset(ctx, expr)
29 s = fileset.getfileset(ctx, expr)
30 fset.update(s)
30 fset.update(s)
31 continue
31 continue
32 other.append((kind, expr))
32 other.append((kind, expr))
33 return fset, other
33 return fset, other
34
34
35 class match(object):
35 class match(object):
36 def __init__(self, root, cwd, patterns, include=[], exclude=[],
36 def __init__(self, root, cwd, patterns, include=[], exclude=[],
37 default='glob', exact=False, auditor=None, ctx=None):
37 default='glob', exact=False, auditor=None, ctx=None):
38 """build an object to match a set of file patterns
38 """build an object to match a set of file patterns
39
39
40 arguments:
40 arguments:
41 root - the canonical root of the tree you're matching against
41 root - the canonical root of the tree you're matching against
42 cwd - the current working directory, if relevant
42 cwd - the current working directory, if relevant
43 patterns - patterns to find
43 patterns - patterns to find
44 include - patterns to include
44 include - patterns to include
45 exclude - patterns to exclude
45 exclude - patterns to exclude
46 default - if a pattern in names has no explicit type, assume this one
46 default - if a pattern in names has no explicit type, assume this one
47 exact - patterns are actually literals
47 exact - patterns are actually literals
48
48
49 a pattern is one of:
49 a pattern is one of:
50 'glob:<glob>' - a glob relative to cwd
50 'glob:<glob>' - a glob relative to cwd
51 're:<regexp>' - a regular expression
51 're:<regexp>' - a regular expression
52 'path:<path>' - a path relative to repository root
52 'path:<path>' - a path relative to repository root
53 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
53 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
54 'relpath:<path>' - a path relative to cwd
54 'relpath:<path>' - a path relative to cwd
55 'relre:<regexp>' - a regexp that needn't match the start of a name
55 'relre:<regexp>' - a regexp that needn't match the start of a name
56 'set:<fileset>' - a fileset expression
56 'set:<fileset>' - a fileset expression
57 '<something>' - a pattern of the specified default type
57 '<something>' - a pattern of the specified default type
58 """
58 """
59
59
60 self._root = root
60 self._root = root
61 self._cwd = cwd
61 self._cwd = cwd
62 self._files = []
62 self._files = []
63 self._anypats = bool(include or exclude)
63 self._anypats = bool(include or exclude)
64 self._ctx = ctx
64 self._ctx = ctx
65 self._always = False
65 self._always = False
66
66
67 if include:
67 if include:
68 pats = _normalize(include, 'glob', root, cwd, auditor)
68 pats = _normalize(include, 'glob', root, cwd, auditor)
69 self.includepat, im = _buildmatch(ctx, pats, '(?:/|$)')
69 self.includepat, im = _buildmatch(ctx, pats, '(?:/|$)')
70 if exclude:
70 if exclude:
71 pats = _normalize(exclude, 'glob', root, cwd, auditor)
71 pats = _normalize(exclude, 'glob', root, cwd, auditor)
72 self.excludepat, em = _buildmatch(ctx, pats, '(?:/|$)')
72 self.excludepat, em = _buildmatch(ctx, pats, '(?:/|$)')
73 if exact:
73 if exact:
74 if isinstance(patterns, list):
74 if isinstance(patterns, list):
75 self._files = patterns
75 self._files = patterns
76 else:
76 else:
77 self._files = list(patterns)
77 self._files = list(patterns)
78 pm = self.exact
78 pm = self.exact
79 elif patterns:
79 elif patterns:
80 pats = _normalize(patterns, default, root, cwd, auditor)
80 pats = _normalize(patterns, default, root, cwd, auditor)
81 self._files = _roots(pats)
81 self._files = _roots(pats)
82 self._anypats = self._anypats or _anypats(pats)
82 self._anypats = self._anypats or _anypats(pats)
83 self.patternspat, pm = _buildmatch(ctx, pats, '$')
83 self.patternspat, pm = _buildmatch(ctx, pats, '$')
84
84
85 if patterns or exact:
85 if patterns or exact:
86 if include:
86 if include:
87 if exclude:
87 if exclude:
88 m = lambda f: im(f) and not em(f) and pm(f)
88 m = lambda f: im(f) and not em(f) and pm(f)
89 else:
89 else:
90 m = lambda f: im(f) and pm(f)
90 m = lambda f: im(f) and pm(f)
91 else:
91 else:
92 if exclude:
92 if exclude:
93 m = lambda f: not em(f) and pm(f)
93 m = lambda f: not em(f) and pm(f)
94 else:
94 else:
95 m = pm
95 m = pm
96 else:
96 else:
97 if include:
97 if include:
98 if exclude:
98 if exclude:
99 m = lambda f: im(f) and not em(f)
99 m = lambda f: im(f) and not em(f)
100 else:
100 else:
101 m = im
101 m = im
102 else:
102 else:
103 if exclude:
103 if exclude:
104 m = lambda f: not em(f)
104 m = lambda f: not em(f)
105 else:
105 else:
106 m = lambda f: True
106 m = lambda f: True
107 self._always = True
107 self._always = True
108
108
109 self.matchfn = m
109 self.matchfn = m
110 self._fmap = set(self._files)
110 self._fmap = set(self._files)
111
111
112 def __call__(self, fn):
112 def __call__(self, fn):
113 return self.matchfn(fn)
113 return self.matchfn(fn)
114 def __iter__(self):
114 def __iter__(self):
115 for f in self._files:
115 for f in self._files:
116 yield f
116 yield f
117 def bad(self, f, msg):
117 def bad(self, f, msg):
118 '''callback for each explicit file that can't be
118 '''callback for each explicit file that can't be
119 found/accessed, with an error message
119 found/accessed, with an error message
120 '''
120 '''
121 pass
121 pass
122 # If this is set, it will be called when an explicitly listed directory is
122 # If this is set, it will be called when an explicitly listed directory is
123 # visited.
123 # visited.
124 explicitdir = None
124 explicitdir = None
125 # If this is set, it will be called when a directory discovered by recursive
125 # If this is set, it will be called when a directory discovered by recursive
126 # traversal is visited.
126 # traversal is visited.
127 traversedir = None
127 traversedir = None
128 def missing(self, f):
128 def missing(self, f):
129 pass
129 pass
130 def exact(self, f):
130 def exact(self, f):
131 return f in self._fmap
131 return f in self._fmap
132 def rel(self, f):
132 def rel(self, f):
133 return util.pathto(self._root, self._cwd, f)
133 return util.pathto(self._root, self._cwd, f)
134 def files(self):
134 def files(self):
135 return self._files
135 return self._files
136 def anypats(self):
136 def anypats(self):
137 return self._anypats
137 return self._anypats
138 def always(self):
138 def always(self):
139 return self._always
139 return self._always
140
140
141 class exact(match):
141 class exact(match):
142 def __init__(self, root, cwd, files):
142 def __init__(self, root, cwd, files):
143 match.__init__(self, root, cwd, files, exact=True)
143 match.__init__(self, root, cwd, files, exact=True)
144
144
145 class always(match):
145 class always(match):
146 def __init__(self, root, cwd):
146 def __init__(self, root, cwd):
147 match.__init__(self, root, cwd, [])
147 match.__init__(self, root, cwd, [])
148 self._always = True
148 self._always = True
149
149
150 class narrowmatcher(match):
150 class narrowmatcher(match):
151 """Adapt a matcher to work on a subdirectory only.
151 """Adapt a matcher to work on a subdirectory only.
152
152
153 The paths are remapped to remove/insert the path as needed:
153 The paths are remapped to remove/insert the path as needed:
154
154
155 >>> m1 = match('root', '', ['a.txt', 'sub/b.txt'])
155 >>> m1 = match('root', '', ['a.txt', 'sub/b.txt'])
156 >>> m2 = narrowmatcher('sub', m1)
156 >>> m2 = narrowmatcher('sub', m1)
157 >>> bool(m2('a.txt'))
157 >>> bool(m2('a.txt'))
158 False
158 False
159 >>> bool(m2('b.txt'))
159 >>> bool(m2('b.txt'))
160 True
160 True
161 >>> bool(m2.matchfn('a.txt'))
161 >>> bool(m2.matchfn('a.txt'))
162 False
162 False
163 >>> bool(m2.matchfn('b.txt'))
163 >>> bool(m2.matchfn('b.txt'))
164 True
164 True
165 >>> m2.files()
165 >>> m2.files()
166 ['b.txt']
166 ['b.txt']
167 >>> m2.exact('b.txt')
167 >>> m2.exact('b.txt')
168 True
168 True
169 >>> m2.rel('b.txt')
169 >>> m2.rel('b.txt')
170 'b.txt'
170 'b.txt'
171 >>> def bad(f, msg):
171 >>> def bad(f, msg):
172 ... print "%s: %s" % (f, msg)
172 ... print "%s: %s" % (f, msg)
173 >>> m1.bad = bad
173 >>> m1.bad = bad
174 >>> m2.bad('x.txt', 'No such file')
174 >>> m2.bad('x.txt', 'No such file')
175 sub/x.txt: No such file
175 sub/x.txt: No such file
176 """
176 """
177
177
178 def __init__(self, path, matcher):
178 def __init__(self, path, matcher):
179 self._root = matcher._root
179 self._root = matcher._root
180 self._cwd = matcher._cwd
180 self._cwd = matcher._cwd
181 self._path = path
181 self._path = path
182 self._matcher = matcher
182 self._matcher = matcher
183 self._always = matcher._always
183 self._always = matcher._always
184
184
185 self._files = [f[len(path) + 1:] for f in matcher._files
185 self._files = [f[len(path) + 1:] for f in matcher._files
186 if f.startswith(path + "/")]
186 if f.startswith(path + "/")]
187 self._anypats = matcher._anypats
187 self._anypats = matcher._anypats
188 self.matchfn = lambda fn: matcher.matchfn(self._path + "/" + fn)
188 self.matchfn = lambda fn: matcher.matchfn(self._path + "/" + fn)
189 self._fmap = set(self._files)
189 self._fmap = set(self._files)
190
190
191 def bad(self, f, msg):
191 def bad(self, f, msg):
192 self._matcher.bad(self._path + "/" + f, msg)
192 self._matcher.bad(self._path + "/" + f, msg)
193
193
194 def patkind(pat):
194 def patkind(pat):
195 return _patsplit(pat, None)[0]
195 return _patsplit(pat, None)[0]
196
196
197 def _patsplit(pat, default):
197 def _patsplit(pat, default):
198 """Split a string into an optional pattern kind prefix and the
198 """Split a string into an optional pattern kind prefix and the
199 actual pattern."""
199 actual pattern."""
200 if ':' in pat:
200 if ':' in pat:
201 kind, val = pat.split(':', 1)
201 kind, val = pat.split(':', 1)
202 if kind in ('re', 'glob', 'path', 'relglob', 'relpath', 'relre',
202 if kind in ('re', 'glob', 'path', 'relglob', 'relpath', 'relre',
203 'listfile', 'listfile0', 'set'):
203 'listfile', 'listfile0', 'set'):
204 return kind, val
204 return kind, val
205 return default, pat
205 return default, pat
206
206
207 def _globre(pat):
207 def _globre(pat):
208 "convert a glob pattern into a regexp"
208 "convert a glob pattern into a regexp"
209 i, n = 0, len(pat)
209 i, n = 0, len(pat)
210 res = ''
210 res = ''
211 group = 0
211 group = 0
212 escape = re.escape
212 escape = re.escape
213 def peek():
213 def peek():
214 return i < n and pat[i]
214 return i < n and pat[i]
215 while i < n:
215 while i < n:
216 c = pat[i]
216 c = pat[i]
217 i += 1
217 i += 1
218 if c not in '*?[{},\\':
218 if c not in '*?[{},\\':
219 res += escape(c)
219 res += escape(c)
220 elif c == '*':
220 elif c == '*':
221 if peek() == '*':
221 if peek() == '*':
222 i += 1
222 i += 1
223 res += '.*'
223 res += '.*'
224 else:
224 else:
225 res += '[^/]*'
225 res += '[^/]*'
226 elif c == '?':
226 elif c == '?':
227 res += '.'
227 res += '.'
228 elif c == '[':
228 elif c == '[':
229 j = i
229 j = i
230 if j < n and pat[j] in '!]':
230 if j < n and pat[j] in '!]':
231 j += 1
231 j += 1
232 while j < n and pat[j] != ']':
232 while j < n and pat[j] != ']':
233 j += 1
233 j += 1
234 if j >= n:
234 if j >= n:
235 res += '\\['
235 res += '\\['
236 else:
236 else:
237 stuff = pat[i:j].replace('\\','\\\\')
237 stuff = pat[i:j].replace('\\','\\\\')
238 i = j + 1
238 i = j + 1
239 if stuff[0] == '!':
239 if stuff[0] == '!':
240 stuff = '^' + stuff[1:]
240 stuff = '^' + stuff[1:]
241 elif stuff[0] == '^':
241 elif stuff[0] == '^':
242 stuff = '\\' + stuff
242 stuff = '\\' + stuff
243 res = '%s[%s]' % (res, stuff)
243 res = '%s[%s]' % (res, stuff)
244 elif c == '{':
244 elif c == '{':
245 group += 1
245 group += 1
246 res += '(?:'
246 res += '(?:'
247 elif c == '}' and group:
247 elif c == '}' and group:
248 res += ')'
248 res += ')'
249 group -= 1
249 group -= 1
250 elif c == ',' and group:
250 elif c == ',' and group:
251 res += '|'
251 res += '|'
252 elif c == '\\':
252 elif c == '\\':
253 p = peek()
253 p = peek()
254 if p:
254 if p:
255 i += 1
255 i += 1
256 res += escape(p)
256 res += escape(p)
257 else:
257 else:
258 res += escape(c)
258 res += escape(c)
259 else:
259 else:
260 res += escape(c)
260 res += escape(c)
261 return res
261 return res
262
262
263 def _regex(kind, name, tail):
263 def _regex(kind, name, tail):
264 '''convert a pattern into a regular expression'''
264 '''convert a pattern into a regular expression'''
265 if not name:
265 if not name:
266 return ''
266 return ''
267 if kind == 're':
267 if kind == 're':
268 return name
268 return name
269 elif kind == 'path':
269 elif kind == 'path':
270 return '^' + re.escape(name) + '(?:/|$)'
270 return '^' + re.escape(name) + '(?:/|$)'
271 elif kind == 'relglob':
271 elif kind == 'relglob':
272 return '(?:|.*/)' + _globre(name) + tail
272 return '(?:|.*/)' + _globre(name) + tail
273 elif kind == 'relpath':
273 elif kind == 'relpath':
274 return re.escape(name) + '(?:/|$)'
274 return re.escape(name) + '(?:/|$)'
275 elif kind == 'relre':
275 elif kind == 'relre':
276 if name.startswith('^'):
276 if name.startswith('^'):
277 return name
277 return name
278 return '.*' + name
278 return '.*' + name
279 return _globre(name) + tail
279 return _globre(name) + tail
280
280
281 def _buildmatch(ctx, pats, tail):
281 def _buildmatch(ctx, pats, tail):
282 fset, pats = _expandsets(pats, ctx)
282 fset, pats = _expandsets(pats, ctx)
283 if not pats:
283 if not pats:
284 return "", fset.__contains__
284 return "", fset.__contains__
285
285
286 pat, mf = _buildregexmatch(pats, tail)
286 pat, mf = _buildregexmatch(pats, tail)
287 if fset:
287 if fset:
288 return pat, lambda f: f in fset or mf(f)
288 return pat, lambda f: f in fset or mf(f)
289 return pat, mf
289 return pat, mf
290
290
291 def _buildregexmatch(pats, tail):
291 def _buildregexmatch(pats, tail):
292 """build a matching function from a set of patterns"""
292 """build a matching function from a set of patterns"""
293 try:
293 try:
294 pat = '(?:%s)' % '|'.join([_regex(k, p, tail) for (k, p) in pats])
294 pat = '(?:%s)' % '|'.join([_regex(k, p, tail) for (k, p) in pats])
295 if len(pat) > 20000:
295 if len(pat) > 20000:
296 raise OverflowError
296 raise OverflowError
297 return pat, _rematcher(pat)
297 return pat, _rematcher(pat)
298 except OverflowError:
298 except OverflowError:
299 # We're using a Python with a tiny regex engine and we
299 # We're using a Python with a tiny regex engine and we
300 # made it explode, so we'll divide the pattern list in two
300 # made it explode, so we'll divide the pattern list in two
301 # until it works
301 # until it works
302 l = len(pats)
302 l = len(pats)
303 if l < 2:
303 if l < 2:
304 raise
304 raise
305 pata, a = _buildregexmatch(pats[:l//2], tail)
305 pata, a = _buildregexmatch(pats[:l//2], tail)
306 patb, b = _buildregexmatch(pats[l//2:], tail)
306 patb, b = _buildregexmatch(pats[l//2:], tail)
307 return pat, lambda s: a(s) or b(s)
307 return pat, lambda s: a(s) or b(s)
308 except re.error:
308 except re.error:
309 for k, p in pats:
309 for k, p in pats:
310 try:
310 try:
311 _rematcher('(?:%s)' % _regex(k, p, tail))
311 _rematcher('(?:%s)' % _regex(k, p, tail))
312 except re.error:
312 except re.error:
313 raise util.Abort(_("invalid pattern (%s): %s") % (k, p))
313 raise util.Abort(_("invalid pattern (%s): %s") % (k, p))
314 raise util.Abort(_("invalid pattern"))
314 raise util.Abort(_("invalid pattern"))
315
315
316 def _normalize(names, default, root, cwd, auditor):
316 def _normalize(names, default, root, cwd, auditor):
317 pats = []
317 pats = []
318 for kind, name in [_patsplit(p, default) for p in names]:
318 for kind, name in [_patsplit(p, default) for p in names]:
319 if kind in ('glob', 'relpath'):
319 if kind in ('glob', 'relpath'):
320 name = scmutil.canonpath(root, cwd, name, auditor)
320 name = pathutil.canonpath(root, cwd, name, auditor)
321 elif kind in ('relglob', 'path'):
321 elif kind in ('relglob', 'path'):
322 name = util.normpath(name)
322 name = util.normpath(name)
323 elif kind in ('listfile', 'listfile0'):
323 elif kind in ('listfile', 'listfile0'):
324 try:
324 try:
325 files = util.readfile(name)
325 files = util.readfile(name)
326 if kind == 'listfile0':
326 if kind == 'listfile0':
327 files = files.split('\0')
327 files = files.split('\0')
328 else:
328 else:
329 files = files.splitlines()
329 files = files.splitlines()
330 files = [f for f in files if f]
330 files = [f for f in files if f]
331 except EnvironmentError:
331 except EnvironmentError:
332 raise util.Abort(_("unable to read file list (%s)") % name)
332 raise util.Abort(_("unable to read file list (%s)") % name)
333 pats += _normalize(files, default, root, cwd, auditor)
333 pats += _normalize(files, default, root, cwd, auditor)
334 continue
334 continue
335
335
336 pats.append((kind, name))
336 pats.append((kind, name))
337 return pats
337 return pats
338
338
339 def _roots(patterns):
339 def _roots(patterns):
340 r = []
340 r = []
341 for kind, name in patterns:
341 for kind, name in patterns:
342 if kind == 'glob': # find the non-glob prefix
342 if kind == 'glob': # find the non-glob prefix
343 root = []
343 root = []
344 for p in name.split('/'):
344 for p in name.split('/'):
345 if '[' in p or '{' in p or '*' in p or '?' in p:
345 if '[' in p or '{' in p or '*' in p or '?' in p:
346 break
346 break
347 root.append(p)
347 root.append(p)
348 r.append('/'.join(root) or '.')
348 r.append('/'.join(root) or '.')
349 elif kind in ('relpath', 'path'):
349 elif kind in ('relpath', 'path'):
350 r.append(name or '.')
350 r.append(name or '.')
351 else: # relglob, re, relre
351 else: # relglob, re, relre
352 r.append('.')
352 r.append('.')
353 return r
353 return r
354
354
355 def _anypats(patterns):
355 def _anypats(patterns):
356 for kind, name in patterns:
356 for kind, name in patterns:
357 if kind in ('glob', 're', 'relglob', 'relre', 'set'):
357 if kind in ('glob', 're', 'relglob', 'relre', 'set'):
358 return True
358 return True
@@ -1,1025 +1,886 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from mercurial.node import nullrev
9 from mercurial.node import nullrev
10 import util, error, osutil, revset, similar, encoding, phases, parsers
10 import util, error, osutil, revset, similar, encoding, phases, parsers
11 import pathutil
11 import match as matchmod
12 import match as matchmod
12 import os, errno, re, stat, glob
13 import os, errno, re, glob
13
14
14 if os.name == 'nt':
15 if os.name == 'nt':
15 import scmwindows as scmplatform
16 import scmwindows as scmplatform
16 else:
17 else:
17 import scmposix as scmplatform
18 import scmposix as scmplatform
18
19
19 systemrcpath = scmplatform.systemrcpath
20 systemrcpath = scmplatform.systemrcpath
20 userrcpath = scmplatform.userrcpath
21 userrcpath = scmplatform.userrcpath
21
22
22 def nochangesfound(ui, repo, excluded=None):
23 def nochangesfound(ui, repo, excluded=None):
23 '''Report no changes for push/pull, excluded is None or a list of
24 '''Report no changes for push/pull, excluded is None or a list of
24 nodes excluded from the push/pull.
25 nodes excluded from the push/pull.
25 '''
26 '''
26 secretlist = []
27 secretlist = []
27 if excluded:
28 if excluded:
28 for n in excluded:
29 for n in excluded:
29 if n not in repo:
30 if n not in repo:
30 # discovery should not have included the filtered revision,
31 # discovery should not have included the filtered revision,
31 # we have to explicitly exclude it until discovery is cleanup.
32 # we have to explicitly exclude it until discovery is cleanup.
32 continue
33 continue
33 ctx = repo[n]
34 ctx = repo[n]
34 if ctx.phase() >= phases.secret and not ctx.extinct():
35 if ctx.phase() >= phases.secret and not ctx.extinct():
35 secretlist.append(n)
36 secretlist.append(n)
36
37
37 if secretlist:
38 if secretlist:
38 ui.status(_("no changes found (ignored %d secret changesets)\n")
39 ui.status(_("no changes found (ignored %d secret changesets)\n")
39 % len(secretlist))
40 % len(secretlist))
40 else:
41 else:
41 ui.status(_("no changes found\n"))
42 ui.status(_("no changes found\n"))
42
43
43 def checknewlabel(repo, lbl, kind):
44 def checknewlabel(repo, lbl, kind):
44 # Do not use the "kind" parameter in ui output.
45 # Do not use the "kind" parameter in ui output.
45 # It makes strings difficult to translate.
46 # It makes strings difficult to translate.
46 if lbl in ['tip', '.', 'null']:
47 if lbl in ['tip', '.', 'null']:
47 raise util.Abort(_("the name '%s' is reserved") % lbl)
48 raise util.Abort(_("the name '%s' is reserved") % lbl)
48 for c in (':', '\0', '\n', '\r'):
49 for c in (':', '\0', '\n', '\r'):
49 if c in lbl:
50 if c in lbl:
50 raise util.Abort(_("%r cannot be used in a name") % c)
51 raise util.Abort(_("%r cannot be used in a name") % c)
51 try:
52 try:
52 int(lbl)
53 int(lbl)
53 raise util.Abort(_("cannot use an integer as a name"))
54 raise util.Abort(_("cannot use an integer as a name"))
54 except ValueError:
55 except ValueError:
55 pass
56 pass
56
57
57 def checkfilename(f):
58 def checkfilename(f):
58 '''Check that the filename f is an acceptable filename for a tracked file'''
59 '''Check that the filename f is an acceptable filename for a tracked file'''
59 if '\r' in f or '\n' in f:
60 if '\r' in f or '\n' in f:
60 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
61 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
61
62
62 def checkportable(ui, f):
63 def checkportable(ui, f):
63 '''Check if filename f is portable and warn or abort depending on config'''
64 '''Check if filename f is portable and warn or abort depending on config'''
64 checkfilename(f)
65 checkfilename(f)
65 abort, warn = checkportabilityalert(ui)
66 abort, warn = checkportabilityalert(ui)
66 if abort or warn:
67 if abort or warn:
67 msg = util.checkwinfilename(f)
68 msg = util.checkwinfilename(f)
68 if msg:
69 if msg:
69 msg = "%s: %r" % (msg, f)
70 msg = "%s: %r" % (msg, f)
70 if abort:
71 if abort:
71 raise util.Abort(msg)
72 raise util.Abort(msg)
72 ui.warn(_("warning: %s\n") % msg)
73 ui.warn(_("warning: %s\n") % msg)
73
74
74 def checkportabilityalert(ui):
75 def checkportabilityalert(ui):
75 '''check if the user's config requests nothing, a warning, or abort for
76 '''check if the user's config requests nothing, a warning, or abort for
76 non-portable filenames'''
77 non-portable filenames'''
77 val = ui.config('ui', 'portablefilenames', 'warn')
78 val = ui.config('ui', 'portablefilenames', 'warn')
78 lval = val.lower()
79 lval = val.lower()
79 bval = util.parsebool(val)
80 bval = util.parsebool(val)
80 abort = os.name == 'nt' or lval == 'abort'
81 abort = os.name == 'nt' or lval == 'abort'
81 warn = bval or lval == 'warn'
82 warn = bval or lval == 'warn'
82 if bval is None and not (warn or abort or lval == 'ignore'):
83 if bval is None and not (warn or abort or lval == 'ignore'):
83 raise error.ConfigError(
84 raise error.ConfigError(
84 _("ui.portablefilenames value is invalid ('%s')") % val)
85 _("ui.portablefilenames value is invalid ('%s')") % val)
85 return abort, warn
86 return abort, warn
86
87
87 class casecollisionauditor(object):
88 class casecollisionauditor(object):
88 def __init__(self, ui, abort, dirstate):
89 def __init__(self, ui, abort, dirstate):
89 self._ui = ui
90 self._ui = ui
90 self._abort = abort
91 self._abort = abort
91 allfiles = '\0'.join(dirstate._map)
92 allfiles = '\0'.join(dirstate._map)
92 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
93 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
93 self._dirstate = dirstate
94 self._dirstate = dirstate
94 # The purpose of _newfiles is so that we don't complain about
95 # The purpose of _newfiles is so that we don't complain about
95 # case collisions if someone were to call this object with the
96 # case collisions if someone were to call this object with the
96 # same filename twice.
97 # same filename twice.
97 self._newfiles = set()
98 self._newfiles = set()
98
99
99 def __call__(self, f):
100 def __call__(self, f):
100 if f in self._newfiles:
101 if f in self._newfiles:
101 return
102 return
102 fl = encoding.lower(f)
103 fl = encoding.lower(f)
103 if fl in self._loweredfiles and f not in self._dirstate:
104 if fl in self._loweredfiles and f not in self._dirstate:
104 msg = _('possible case-folding collision for %s') % f
105 msg = _('possible case-folding collision for %s') % f
105 if self._abort:
106 if self._abort:
106 raise util.Abort(msg)
107 raise util.Abort(msg)
107 self._ui.warn(_("warning: %s\n") % msg)
108 self._ui.warn(_("warning: %s\n") % msg)
108 self._loweredfiles.add(fl)
109 self._loweredfiles.add(fl)
109 self._newfiles.add(f)
110 self._newfiles.add(f)
110
111
111 class pathauditor(object):
112 '''ensure that a filesystem path contains no banned components.
113 the following properties of a path are checked:
114
115 - ends with a directory separator
116 - under top-level .hg
117 - starts at the root of a windows drive
118 - contains ".."
119 - traverses a symlink (e.g. a/symlink_here/b)
120 - inside a nested repository (a callback can be used to approve
121 some nested repositories, e.g., subrepositories)
122 '''
123
124 def __init__(self, root, callback=None):
125 self.audited = set()
126 self.auditeddir = set()
127 self.root = root
128 self.callback = callback
129 if os.path.lexists(root) and not util.checkcase(root):
130 self.normcase = util.normcase
131 else:
132 self.normcase = lambda x: x
133
134 def __call__(self, path):
135 '''Check the relative path.
136 path may contain a pattern (e.g. foodir/**.txt)'''
137
138 path = util.localpath(path)
139 normpath = self.normcase(path)
140 if normpath in self.audited:
141 return
142 # AIX ignores "/" at end of path, others raise EISDIR.
143 if util.endswithsep(path):
144 raise util.Abort(_("path ends in directory separator: %s") % path)
145 parts = util.splitpath(path)
146 if (os.path.splitdrive(path)[0]
147 or parts[0].lower() in ('.hg', '.hg.', '')
148 or os.pardir in parts):
149 raise util.Abort(_("path contains illegal component: %s") % path)
150 if '.hg' in path.lower():
151 lparts = [p.lower() for p in parts]
152 for p in '.hg', '.hg.':
153 if p in lparts[1:]:
154 pos = lparts.index(p)
155 base = os.path.join(*parts[:pos])
156 raise util.Abort(_("path '%s' is inside nested repo %r")
157 % (path, base))
158
159 normparts = util.splitpath(normpath)
160 assert len(parts) == len(normparts)
161
162 parts.pop()
163 normparts.pop()
164 prefixes = []
165 while parts:
166 prefix = os.sep.join(parts)
167 normprefix = os.sep.join(normparts)
168 if normprefix in self.auditeddir:
169 break
170 curpath = os.path.join(self.root, prefix)
171 try:
172 st = os.lstat(curpath)
173 except OSError, err:
174 # EINVAL can be raised as invalid path syntax under win32.
175 # They must be ignored for patterns can be checked too.
176 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
177 raise
178 else:
179 if stat.S_ISLNK(st.st_mode):
180 raise util.Abort(
181 _('path %r traverses symbolic link %r')
182 % (path, prefix))
183 elif (stat.S_ISDIR(st.st_mode) and
184 os.path.isdir(os.path.join(curpath, '.hg'))):
185 if not self.callback or not self.callback(curpath):
186 raise util.Abort(_("path '%s' is inside nested "
187 "repo %r")
188 % (path, prefix))
189 prefixes.append(normprefix)
190 parts.pop()
191 normparts.pop()
192
193 self.audited.add(normpath)
194 # only add prefixes to the cache after checking everything: we don't
195 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
196 self.auditeddir.update(prefixes)
197
198 def check(self, path):
199 try:
200 self(path)
201 return True
202 except (OSError, util.Abort):
203 return False
204
205 class abstractvfs(object):
112 class abstractvfs(object):
206 """Abstract base class; cannot be instantiated"""
113 """Abstract base class; cannot be instantiated"""
207
114
208 def __init__(self, *args, **kwargs):
115 def __init__(self, *args, **kwargs):
209 '''Prevent instantiation; don't call this from subclasses.'''
116 '''Prevent instantiation; don't call this from subclasses.'''
210 raise NotImplementedError('attempted instantiating ' + str(type(self)))
117 raise NotImplementedError('attempted instantiating ' + str(type(self)))
211
118
212 def tryread(self, path):
119 def tryread(self, path):
213 '''gracefully return an empty string for missing files'''
120 '''gracefully return an empty string for missing files'''
214 try:
121 try:
215 return self.read(path)
122 return self.read(path)
216 except IOError, inst:
123 except IOError, inst:
217 if inst.errno != errno.ENOENT:
124 if inst.errno != errno.ENOENT:
218 raise
125 raise
219 return ""
126 return ""
220
127
221 def open(self, path, mode="r", text=False, atomictemp=False):
128 def open(self, path, mode="r", text=False, atomictemp=False):
222 self.open = self.__call__
129 self.open = self.__call__
223 return self.__call__(path, mode, text, atomictemp)
130 return self.__call__(path, mode, text, atomictemp)
224
131
225 def read(self, path):
132 def read(self, path):
226 fp = self(path, 'rb')
133 fp = self(path, 'rb')
227 try:
134 try:
228 return fp.read()
135 return fp.read()
229 finally:
136 finally:
230 fp.close()
137 fp.close()
231
138
232 def write(self, path, data):
139 def write(self, path, data):
233 fp = self(path, 'wb')
140 fp = self(path, 'wb')
234 try:
141 try:
235 return fp.write(data)
142 return fp.write(data)
236 finally:
143 finally:
237 fp.close()
144 fp.close()
238
145
239 def append(self, path, data):
146 def append(self, path, data):
240 fp = self(path, 'ab')
147 fp = self(path, 'ab')
241 try:
148 try:
242 return fp.write(data)
149 return fp.write(data)
243 finally:
150 finally:
244 fp.close()
151 fp.close()
245
152
246 def exists(self, path=None):
153 def exists(self, path=None):
247 return os.path.exists(self.join(path))
154 return os.path.exists(self.join(path))
248
155
249 def fstat(self, fp):
156 def fstat(self, fp):
250 return util.fstat(fp)
157 return util.fstat(fp)
251
158
252 def isdir(self, path=None):
159 def isdir(self, path=None):
253 return os.path.isdir(self.join(path))
160 return os.path.isdir(self.join(path))
254
161
255 def islink(self, path=None):
162 def islink(self, path=None):
256 return os.path.islink(self.join(path))
163 return os.path.islink(self.join(path))
257
164
258 def lstat(self, path=None):
165 def lstat(self, path=None):
259 return os.lstat(self.join(path))
166 return os.lstat(self.join(path))
260
167
261 def makedir(self, path=None, notindexed=True):
168 def makedir(self, path=None, notindexed=True):
262 return util.makedir(self.join(path), notindexed)
169 return util.makedir(self.join(path), notindexed)
263
170
264 def makedirs(self, path=None, mode=None):
171 def makedirs(self, path=None, mode=None):
265 return util.makedirs(self.join(path), mode)
172 return util.makedirs(self.join(path), mode)
266
173
267 def mkdir(self, path=None):
174 def mkdir(self, path=None):
268 return os.mkdir(self.join(path))
175 return os.mkdir(self.join(path))
269
176
270 def readdir(self, path=None, stat=None, skip=None):
177 def readdir(self, path=None, stat=None, skip=None):
271 return osutil.listdir(self.join(path), stat, skip)
178 return osutil.listdir(self.join(path), stat, skip)
272
179
273 def rename(self, src, dst):
180 def rename(self, src, dst):
274 return util.rename(self.join(src), self.join(dst))
181 return util.rename(self.join(src), self.join(dst))
275
182
276 def readlink(self, path):
183 def readlink(self, path):
277 return os.readlink(self.join(path))
184 return os.readlink(self.join(path))
278
185
279 def setflags(self, path, l, x):
186 def setflags(self, path, l, x):
280 return util.setflags(self.join(path), l, x)
187 return util.setflags(self.join(path), l, x)
281
188
282 def stat(self, path=None):
189 def stat(self, path=None):
283 return os.stat(self.join(path))
190 return os.stat(self.join(path))
284
191
285 def unlink(self, path=None):
192 def unlink(self, path=None):
286 return util.unlink(self.join(path))
193 return util.unlink(self.join(path))
287
194
288 def utime(self, path=None, t=None):
195 def utime(self, path=None, t=None):
289 return os.utime(self.join(path), t)
196 return os.utime(self.join(path), t)
290
197
291 class vfs(abstractvfs):
198 class vfs(abstractvfs):
292 '''Operate files relative to a base directory
199 '''Operate files relative to a base directory
293
200
294 This class is used to hide the details of COW semantics and
201 This class is used to hide the details of COW semantics and
295 remote file access from higher level code.
202 remote file access from higher level code.
296 '''
203 '''
297 def __init__(self, base, audit=True, expandpath=False, realpath=False):
204 def __init__(self, base, audit=True, expandpath=False, realpath=False):
298 if expandpath:
205 if expandpath:
299 base = util.expandpath(base)
206 base = util.expandpath(base)
300 if realpath:
207 if realpath:
301 base = os.path.realpath(base)
208 base = os.path.realpath(base)
302 self.base = base
209 self.base = base
303 self._setmustaudit(audit)
210 self._setmustaudit(audit)
304 self.createmode = None
211 self.createmode = None
305 self._trustnlink = None
212 self._trustnlink = None
306
213
307 def _getmustaudit(self):
214 def _getmustaudit(self):
308 return self._audit
215 return self._audit
309
216
310 def _setmustaudit(self, onoff):
217 def _setmustaudit(self, onoff):
311 self._audit = onoff
218 self._audit = onoff
312 if onoff:
219 if onoff:
313 self.audit = pathauditor(self.base)
220 self.audit = pathutil.pathauditor(self.base)
314 else:
221 else:
315 self.audit = util.always
222 self.audit = util.always
316
223
317 mustaudit = property(_getmustaudit, _setmustaudit)
224 mustaudit = property(_getmustaudit, _setmustaudit)
318
225
319 @util.propertycache
226 @util.propertycache
320 def _cansymlink(self):
227 def _cansymlink(self):
321 return util.checklink(self.base)
228 return util.checklink(self.base)
322
229
323 @util.propertycache
230 @util.propertycache
324 def _chmod(self):
231 def _chmod(self):
325 return util.checkexec(self.base)
232 return util.checkexec(self.base)
326
233
327 def _fixfilemode(self, name):
234 def _fixfilemode(self, name):
328 if self.createmode is None or not self._chmod:
235 if self.createmode is None or not self._chmod:
329 return
236 return
330 os.chmod(name, self.createmode & 0666)
237 os.chmod(name, self.createmode & 0666)
331
238
332 def __call__(self, path, mode="r", text=False, atomictemp=False):
239 def __call__(self, path, mode="r", text=False, atomictemp=False):
333 if self._audit:
240 if self._audit:
334 r = util.checkosfilename(path)
241 r = util.checkosfilename(path)
335 if r:
242 if r:
336 raise util.Abort("%s: %r" % (r, path))
243 raise util.Abort("%s: %r" % (r, path))
337 self.audit(path)
244 self.audit(path)
338 f = self.join(path)
245 f = self.join(path)
339
246
340 if not text and "b" not in mode:
247 if not text and "b" not in mode:
341 mode += "b" # for that other OS
248 mode += "b" # for that other OS
342
249
343 nlink = -1
250 nlink = -1
344 if mode not in ('r', 'rb'):
251 if mode not in ('r', 'rb'):
345 dirname, basename = util.split(f)
252 dirname, basename = util.split(f)
346 # If basename is empty, then the path is malformed because it points
253 # If basename is empty, then the path is malformed because it points
347 # to a directory. Let the posixfile() call below raise IOError.
254 # to a directory. Let the posixfile() call below raise IOError.
348 if basename:
255 if basename:
349 if atomictemp:
256 if atomictemp:
350 util.ensuredirs(dirname, self.createmode)
257 util.ensuredirs(dirname, self.createmode)
351 return util.atomictempfile(f, mode, self.createmode)
258 return util.atomictempfile(f, mode, self.createmode)
352 try:
259 try:
353 if 'w' in mode:
260 if 'w' in mode:
354 util.unlink(f)
261 util.unlink(f)
355 nlink = 0
262 nlink = 0
356 else:
263 else:
357 # nlinks() may behave differently for files on Windows
264 # nlinks() may behave differently for files on Windows
358 # shares if the file is open.
265 # shares if the file is open.
359 fd = util.posixfile(f)
266 fd = util.posixfile(f)
360 nlink = util.nlinks(f)
267 nlink = util.nlinks(f)
361 if nlink < 1:
268 if nlink < 1:
362 nlink = 2 # force mktempcopy (issue1922)
269 nlink = 2 # force mktempcopy (issue1922)
363 fd.close()
270 fd.close()
364 except (OSError, IOError), e:
271 except (OSError, IOError), e:
365 if e.errno != errno.ENOENT:
272 if e.errno != errno.ENOENT:
366 raise
273 raise
367 nlink = 0
274 nlink = 0
368 util.ensuredirs(dirname, self.createmode)
275 util.ensuredirs(dirname, self.createmode)
369 if nlink > 0:
276 if nlink > 0:
370 if self._trustnlink is None:
277 if self._trustnlink is None:
371 self._trustnlink = nlink > 1 or util.checknlink(f)
278 self._trustnlink = nlink > 1 or util.checknlink(f)
372 if nlink > 1 or not self._trustnlink:
279 if nlink > 1 or not self._trustnlink:
373 util.rename(util.mktempcopy(f), f)
280 util.rename(util.mktempcopy(f), f)
374 fp = util.posixfile(f, mode)
281 fp = util.posixfile(f, mode)
375 if nlink == 0:
282 if nlink == 0:
376 self._fixfilemode(f)
283 self._fixfilemode(f)
377 return fp
284 return fp
378
285
379 def symlink(self, src, dst):
286 def symlink(self, src, dst):
380 self.audit(dst)
287 self.audit(dst)
381 linkname = self.join(dst)
288 linkname = self.join(dst)
382 try:
289 try:
383 os.unlink(linkname)
290 os.unlink(linkname)
384 except OSError:
291 except OSError:
385 pass
292 pass
386
293
387 util.ensuredirs(os.path.dirname(linkname), self.createmode)
294 util.ensuredirs(os.path.dirname(linkname), self.createmode)
388
295
389 if self._cansymlink:
296 if self._cansymlink:
390 try:
297 try:
391 os.symlink(src, linkname)
298 os.symlink(src, linkname)
392 except OSError, err:
299 except OSError, err:
393 raise OSError(err.errno, _('could not symlink to %r: %s') %
300 raise OSError(err.errno, _('could not symlink to %r: %s') %
394 (src, err.strerror), linkname)
301 (src, err.strerror), linkname)
395 else:
302 else:
396 self.write(dst, src)
303 self.write(dst, src)
397
304
398 def join(self, path):
305 def join(self, path):
399 if path:
306 if path:
400 return os.path.join(self.base, path)
307 return os.path.join(self.base, path)
401 else:
308 else:
402 return self.base
309 return self.base
403
310
404 opener = vfs
311 opener = vfs
405
312
406 class auditvfs(object):
313 class auditvfs(object):
407 def __init__(self, vfs):
314 def __init__(self, vfs):
408 self.vfs = vfs
315 self.vfs = vfs
409
316
410 def _getmustaudit(self):
317 def _getmustaudit(self):
411 return self.vfs.mustaudit
318 return self.vfs.mustaudit
412
319
413 def _setmustaudit(self, onoff):
320 def _setmustaudit(self, onoff):
414 self.vfs.mustaudit = onoff
321 self.vfs.mustaudit = onoff
415
322
416 mustaudit = property(_getmustaudit, _setmustaudit)
323 mustaudit = property(_getmustaudit, _setmustaudit)
417
324
418 class filtervfs(abstractvfs, auditvfs):
325 class filtervfs(abstractvfs, auditvfs):
419 '''Wrapper vfs for filtering filenames with a function.'''
326 '''Wrapper vfs for filtering filenames with a function.'''
420
327
421 def __init__(self, vfs, filter):
328 def __init__(self, vfs, filter):
422 auditvfs.__init__(self, vfs)
329 auditvfs.__init__(self, vfs)
423 self._filter = filter
330 self._filter = filter
424
331
425 def __call__(self, path, *args, **kwargs):
332 def __call__(self, path, *args, **kwargs):
426 return self.vfs(self._filter(path), *args, **kwargs)
333 return self.vfs(self._filter(path), *args, **kwargs)
427
334
428 def join(self, path):
335 def join(self, path):
429 if path:
336 if path:
430 return self.vfs.join(self._filter(path))
337 return self.vfs.join(self._filter(path))
431 else:
338 else:
432 return self.vfs.join(path)
339 return self.vfs.join(path)
433
340
434 filteropener = filtervfs
341 filteropener = filtervfs
435
342
436 class readonlyvfs(abstractvfs, auditvfs):
343 class readonlyvfs(abstractvfs, auditvfs):
437 '''Wrapper vfs preventing any writing.'''
344 '''Wrapper vfs preventing any writing.'''
438
345
439 def __init__(self, vfs):
346 def __init__(self, vfs):
440 auditvfs.__init__(self, vfs)
347 auditvfs.__init__(self, vfs)
441
348
442 def __call__(self, path, mode='r', *args, **kw):
349 def __call__(self, path, mode='r', *args, **kw):
443 if mode not in ('r', 'rb'):
350 if mode not in ('r', 'rb'):
444 raise util.Abort('this vfs is read only')
351 raise util.Abort('this vfs is read only')
445 return self.vfs(path, mode, *args, **kw)
352 return self.vfs(path, mode, *args, **kw)
446
353
447
354
448 def canonpath(root, cwd, myname, auditor=None):
449 '''return the canonical path of myname, given cwd and root'''
450 if util.endswithsep(root):
451 rootsep = root
452 else:
453 rootsep = root + os.sep
454 name = myname
455 if not os.path.isabs(name):
456 name = os.path.join(root, cwd, name)
457 name = os.path.normpath(name)
458 if auditor is None:
459 auditor = pathauditor(root)
460 if name != rootsep and name.startswith(rootsep):
461 name = name[len(rootsep):]
462 auditor(name)
463 return util.pconvert(name)
464 elif name == root:
465 return ''
466 else:
467 # Determine whether `name' is in the hierarchy at or beneath `root',
468 # by iterating name=dirname(name) until that causes no change (can't
469 # check name == '/', because that doesn't work on windows). The list
470 # `rel' holds the reversed list of components making up the relative
471 # file name we want.
472 rel = []
473 while True:
474 try:
475 s = util.samefile(name, root)
476 except OSError:
477 s = False
478 if s:
479 if not rel:
480 # name was actually the same as root (maybe a symlink)
481 return ''
482 rel.reverse()
483 name = os.path.join(*rel)
484 auditor(name)
485 return util.pconvert(name)
486 dirname, basename = util.split(name)
487 rel.append(basename)
488 if dirname == name:
489 break
490 name = dirname
491
492 raise util.Abort(_("%s not under root '%s'") % (myname, root))
493
494 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
355 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
495 '''yield every hg repository under path, always recursively.
356 '''yield every hg repository under path, always recursively.
496 The recurse flag will only control recursion into repo working dirs'''
357 The recurse flag will only control recursion into repo working dirs'''
497 def errhandler(err):
358 def errhandler(err):
498 if err.filename == path:
359 if err.filename == path:
499 raise err
360 raise err
500 samestat = getattr(os.path, 'samestat', None)
361 samestat = getattr(os.path, 'samestat', None)
501 if followsym and samestat is not None:
362 if followsym and samestat is not None:
502 def adddir(dirlst, dirname):
363 def adddir(dirlst, dirname):
503 match = False
364 match = False
504 dirstat = os.stat(dirname)
365 dirstat = os.stat(dirname)
505 for lstdirstat in dirlst:
366 for lstdirstat in dirlst:
506 if samestat(dirstat, lstdirstat):
367 if samestat(dirstat, lstdirstat):
507 match = True
368 match = True
508 break
369 break
509 if not match:
370 if not match:
510 dirlst.append(dirstat)
371 dirlst.append(dirstat)
511 return not match
372 return not match
512 else:
373 else:
513 followsym = False
374 followsym = False
514
375
515 if (seen_dirs is None) and followsym:
376 if (seen_dirs is None) and followsym:
516 seen_dirs = []
377 seen_dirs = []
517 adddir(seen_dirs, path)
378 adddir(seen_dirs, path)
518 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
379 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
519 dirs.sort()
380 dirs.sort()
520 if '.hg' in dirs:
381 if '.hg' in dirs:
521 yield root # found a repository
382 yield root # found a repository
522 qroot = os.path.join(root, '.hg', 'patches')
383 qroot = os.path.join(root, '.hg', 'patches')
523 if os.path.isdir(os.path.join(qroot, '.hg')):
384 if os.path.isdir(os.path.join(qroot, '.hg')):
524 yield qroot # we have a patch queue repo here
385 yield qroot # we have a patch queue repo here
525 if recurse:
386 if recurse:
526 # avoid recursing inside the .hg directory
387 # avoid recursing inside the .hg directory
527 dirs.remove('.hg')
388 dirs.remove('.hg')
528 else:
389 else:
529 dirs[:] = [] # don't descend further
390 dirs[:] = [] # don't descend further
530 elif followsym:
391 elif followsym:
531 newdirs = []
392 newdirs = []
532 for d in dirs:
393 for d in dirs:
533 fname = os.path.join(root, d)
394 fname = os.path.join(root, d)
534 if adddir(seen_dirs, fname):
395 if adddir(seen_dirs, fname):
535 if os.path.islink(fname):
396 if os.path.islink(fname):
536 for hgname in walkrepos(fname, True, seen_dirs):
397 for hgname in walkrepos(fname, True, seen_dirs):
537 yield hgname
398 yield hgname
538 else:
399 else:
539 newdirs.append(d)
400 newdirs.append(d)
540 dirs[:] = newdirs
401 dirs[:] = newdirs
541
402
542 def osrcpath():
403 def osrcpath():
543 '''return default os-specific hgrc search path'''
404 '''return default os-specific hgrc search path'''
544 path = systemrcpath()
405 path = systemrcpath()
545 path.extend(userrcpath())
406 path.extend(userrcpath())
546 path = [os.path.normpath(f) for f in path]
407 path = [os.path.normpath(f) for f in path]
547 return path
408 return path
548
409
549 _rcpath = None
410 _rcpath = None
550
411
551 def rcpath():
412 def rcpath():
552 '''return hgrc search path. if env var HGRCPATH is set, use it.
413 '''return hgrc search path. if env var HGRCPATH is set, use it.
553 for each item in path, if directory, use files ending in .rc,
414 for each item in path, if directory, use files ending in .rc,
554 else use item.
415 else use item.
555 make HGRCPATH empty to only look in .hg/hgrc of current repo.
416 make HGRCPATH empty to only look in .hg/hgrc of current repo.
556 if no HGRCPATH, use default os-specific path.'''
417 if no HGRCPATH, use default os-specific path.'''
557 global _rcpath
418 global _rcpath
558 if _rcpath is None:
419 if _rcpath is None:
559 if 'HGRCPATH' in os.environ:
420 if 'HGRCPATH' in os.environ:
560 _rcpath = []
421 _rcpath = []
561 for p in os.environ['HGRCPATH'].split(os.pathsep):
422 for p in os.environ['HGRCPATH'].split(os.pathsep):
562 if not p:
423 if not p:
563 continue
424 continue
564 p = util.expandpath(p)
425 p = util.expandpath(p)
565 if os.path.isdir(p):
426 if os.path.isdir(p):
566 for f, kind in osutil.listdir(p):
427 for f, kind in osutil.listdir(p):
567 if f.endswith('.rc'):
428 if f.endswith('.rc'):
568 _rcpath.append(os.path.join(p, f))
429 _rcpath.append(os.path.join(p, f))
569 else:
430 else:
570 _rcpath.append(p)
431 _rcpath.append(p)
571 else:
432 else:
572 _rcpath = osrcpath()
433 _rcpath = osrcpath()
573 return _rcpath
434 return _rcpath
574
435
575 def revsingle(repo, revspec, default='.'):
436 def revsingle(repo, revspec, default='.'):
576 if not revspec and revspec != 0:
437 if not revspec and revspec != 0:
577 return repo[default]
438 return repo[default]
578
439
579 l = revrange(repo, [revspec])
440 l = revrange(repo, [revspec])
580 if len(l) < 1:
441 if len(l) < 1:
581 raise util.Abort(_('empty revision set'))
442 raise util.Abort(_('empty revision set'))
582 return repo[l[-1]]
443 return repo[l[-1]]
583
444
584 def revpair(repo, revs):
445 def revpair(repo, revs):
585 if not revs:
446 if not revs:
586 return repo.dirstate.p1(), None
447 return repo.dirstate.p1(), None
587
448
588 l = revrange(repo, revs)
449 l = revrange(repo, revs)
589
450
590 if len(l) == 0:
451 if len(l) == 0:
591 if revs:
452 if revs:
592 raise util.Abort(_('empty revision range'))
453 raise util.Abort(_('empty revision range'))
593 return repo.dirstate.p1(), None
454 return repo.dirstate.p1(), None
594
455
595 if len(l) == 1 and len(revs) == 1 and _revrangesep not in revs[0]:
456 if len(l) == 1 and len(revs) == 1 and _revrangesep not in revs[0]:
596 return repo.lookup(l[0]), None
457 return repo.lookup(l[0]), None
597
458
598 return repo.lookup(l[0]), repo.lookup(l[-1])
459 return repo.lookup(l[0]), repo.lookup(l[-1])
599
460
600 _revrangesep = ':'
461 _revrangesep = ':'
601
462
602 def revrange(repo, revs):
463 def revrange(repo, revs):
603 """Yield revision as strings from a list of revision specifications."""
464 """Yield revision as strings from a list of revision specifications."""
604
465
605 def revfix(repo, val, defval):
466 def revfix(repo, val, defval):
606 if not val and val != 0 and defval is not None:
467 if not val and val != 0 and defval is not None:
607 return defval
468 return defval
608 return repo[val].rev()
469 return repo[val].rev()
609
470
610 seen, l = set(), []
471 seen, l = set(), []
611 for spec in revs:
472 for spec in revs:
612 if l and not seen:
473 if l and not seen:
613 seen = set(l)
474 seen = set(l)
614 # attempt to parse old-style ranges first to deal with
475 # attempt to parse old-style ranges first to deal with
615 # things like old-tag which contain query metacharacters
476 # things like old-tag which contain query metacharacters
616 try:
477 try:
617 if isinstance(spec, int):
478 if isinstance(spec, int):
618 seen.add(spec)
479 seen.add(spec)
619 l.append(spec)
480 l.append(spec)
620 continue
481 continue
621
482
622 if _revrangesep in spec:
483 if _revrangesep in spec:
623 start, end = spec.split(_revrangesep, 1)
484 start, end = spec.split(_revrangesep, 1)
624 start = revfix(repo, start, 0)
485 start = revfix(repo, start, 0)
625 end = revfix(repo, end, len(repo) - 1)
486 end = revfix(repo, end, len(repo) - 1)
626 if end == nullrev and start <= 0:
487 if end == nullrev and start <= 0:
627 start = nullrev
488 start = nullrev
628 rangeiter = repo.changelog.revs(start, end)
489 rangeiter = repo.changelog.revs(start, end)
629 if not seen and not l:
490 if not seen and not l:
630 # by far the most common case: revs = ["-1:0"]
491 # by far the most common case: revs = ["-1:0"]
631 l = list(rangeiter)
492 l = list(rangeiter)
632 # defer syncing seen until next iteration
493 # defer syncing seen until next iteration
633 continue
494 continue
634 newrevs = set(rangeiter)
495 newrevs = set(rangeiter)
635 if seen:
496 if seen:
636 newrevs.difference_update(seen)
497 newrevs.difference_update(seen)
637 seen.update(newrevs)
498 seen.update(newrevs)
638 else:
499 else:
639 seen = newrevs
500 seen = newrevs
640 l.extend(sorted(newrevs, reverse=start > end))
501 l.extend(sorted(newrevs, reverse=start > end))
641 continue
502 continue
642 elif spec and spec in repo: # single unquoted rev
503 elif spec and spec in repo: # single unquoted rev
643 rev = revfix(repo, spec, None)
504 rev = revfix(repo, spec, None)
644 if rev in seen:
505 if rev in seen:
645 continue
506 continue
646 seen.add(rev)
507 seen.add(rev)
647 l.append(rev)
508 l.append(rev)
648 continue
509 continue
649 except error.RepoLookupError:
510 except error.RepoLookupError:
650 pass
511 pass
651
512
652 # fall through to new-style queries if old-style fails
513 # fall through to new-style queries if old-style fails
653 m = revset.match(repo.ui, spec)
514 m = revset.match(repo.ui, spec)
654 dl = [r for r in m(repo, list(repo)) if r not in seen]
515 dl = [r for r in m(repo, list(repo)) if r not in seen]
655 l.extend(dl)
516 l.extend(dl)
656 seen.update(dl)
517 seen.update(dl)
657
518
658 return l
519 return l
659
520
660 def expandpats(pats):
521 def expandpats(pats):
661 if not util.expandglobs:
522 if not util.expandglobs:
662 return list(pats)
523 return list(pats)
663 ret = []
524 ret = []
664 for p in pats:
525 for p in pats:
665 kind, name = matchmod._patsplit(p, None)
526 kind, name = matchmod._patsplit(p, None)
666 if kind is None:
527 if kind is None:
667 try:
528 try:
668 globbed = glob.glob(name)
529 globbed = glob.glob(name)
669 except re.error:
530 except re.error:
670 globbed = [name]
531 globbed = [name]
671 if globbed:
532 if globbed:
672 ret.extend(globbed)
533 ret.extend(globbed)
673 continue
534 continue
674 ret.append(p)
535 ret.append(p)
675 return ret
536 return ret
676
537
677 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
538 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
678 if pats == ("",):
539 if pats == ("",):
679 pats = []
540 pats = []
680 if not globbed and default == 'relpath':
541 if not globbed and default == 'relpath':
681 pats = expandpats(pats or [])
542 pats = expandpats(pats or [])
682
543
683 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
544 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
684 default)
545 default)
685 def badfn(f, msg):
546 def badfn(f, msg):
686 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
547 ctx._repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
687 m.bad = badfn
548 m.bad = badfn
688 return m, pats
549 return m, pats
689
550
690 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
551 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
691 return matchandpats(ctx, pats, opts, globbed, default)[0]
552 return matchandpats(ctx, pats, opts, globbed, default)[0]
692
553
693 def matchall(repo):
554 def matchall(repo):
694 return matchmod.always(repo.root, repo.getcwd())
555 return matchmod.always(repo.root, repo.getcwd())
695
556
696 def matchfiles(repo, files):
557 def matchfiles(repo, files):
697 return matchmod.exact(repo.root, repo.getcwd(), files)
558 return matchmod.exact(repo.root, repo.getcwd(), files)
698
559
699 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
560 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
700 if dry_run is None:
561 if dry_run is None:
701 dry_run = opts.get('dry_run')
562 dry_run = opts.get('dry_run')
702 if similarity is None:
563 if similarity is None:
703 similarity = float(opts.get('similarity') or 0)
564 similarity = float(opts.get('similarity') or 0)
704 # we'd use status here, except handling of symlinks and ignore is tricky
565 # we'd use status here, except handling of symlinks and ignore is tricky
705 m = match(repo[None], pats, opts)
566 m = match(repo[None], pats, opts)
706 rejected = []
567 rejected = []
707 m.bad = lambda x, y: rejected.append(x)
568 m.bad = lambda x, y: rejected.append(x)
708
569
709 added, unknown, deleted, removed = _interestingfiles(repo, m)
570 added, unknown, deleted, removed = _interestingfiles(repo, m)
710
571
711 unknownset = set(unknown)
572 unknownset = set(unknown)
712 toprint = unknownset.copy()
573 toprint = unknownset.copy()
713 toprint.update(deleted)
574 toprint.update(deleted)
714 for abs in sorted(toprint):
575 for abs in sorted(toprint):
715 if repo.ui.verbose or not m.exact(abs):
576 if repo.ui.verbose or not m.exact(abs):
716 rel = m.rel(abs)
577 rel = m.rel(abs)
717 if abs in unknownset:
578 if abs in unknownset:
718 status = _('adding %s\n') % ((pats and rel) or abs)
579 status = _('adding %s\n') % ((pats and rel) or abs)
719 else:
580 else:
720 status = _('removing %s\n') % ((pats and rel) or abs)
581 status = _('removing %s\n') % ((pats and rel) or abs)
721 repo.ui.status(status)
582 repo.ui.status(status)
722
583
723 renames = _findrenames(repo, m, added + unknown, removed + deleted,
584 renames = _findrenames(repo, m, added + unknown, removed + deleted,
724 similarity)
585 similarity)
725
586
726 if not dry_run:
587 if not dry_run:
727 _markchanges(repo, unknown, deleted, renames)
588 _markchanges(repo, unknown, deleted, renames)
728
589
729 for f in rejected:
590 for f in rejected:
730 if f in m.files():
591 if f in m.files():
731 return 1
592 return 1
732 return 0
593 return 0
733
594
734 def marktouched(repo, files, similarity=0.0):
595 def marktouched(repo, files, similarity=0.0):
735 '''Assert that files have somehow been operated upon. files are relative to
596 '''Assert that files have somehow been operated upon. files are relative to
736 the repo root.'''
597 the repo root.'''
737 m = matchfiles(repo, files)
598 m = matchfiles(repo, files)
738 rejected = []
599 rejected = []
739 m.bad = lambda x, y: rejected.append(x)
600 m.bad = lambda x, y: rejected.append(x)
740
601
741 added, unknown, deleted, removed = _interestingfiles(repo, m)
602 added, unknown, deleted, removed = _interestingfiles(repo, m)
742
603
743 if repo.ui.verbose:
604 if repo.ui.verbose:
744 unknownset = set(unknown)
605 unknownset = set(unknown)
745 toprint = unknownset.copy()
606 toprint = unknownset.copy()
746 toprint.update(deleted)
607 toprint.update(deleted)
747 for abs in sorted(toprint):
608 for abs in sorted(toprint):
748 if abs in unknownset:
609 if abs in unknownset:
749 status = _('adding %s\n') % abs
610 status = _('adding %s\n') % abs
750 else:
611 else:
751 status = _('removing %s\n') % abs
612 status = _('removing %s\n') % abs
752 repo.ui.status(status)
613 repo.ui.status(status)
753
614
754 renames = _findrenames(repo, m, added + unknown, removed + deleted,
615 renames = _findrenames(repo, m, added + unknown, removed + deleted,
755 similarity)
616 similarity)
756
617
757 _markchanges(repo, unknown, deleted, renames)
618 _markchanges(repo, unknown, deleted, renames)
758
619
759 for f in rejected:
620 for f in rejected:
760 if f in m.files():
621 if f in m.files():
761 return 1
622 return 1
762 return 0
623 return 0
763
624
764 def _interestingfiles(repo, matcher):
625 def _interestingfiles(repo, matcher):
765 '''Walk dirstate with matcher, looking for files that addremove would care
626 '''Walk dirstate with matcher, looking for files that addremove would care
766 about.
627 about.
767
628
768 This is different from dirstate.status because it doesn't care about
629 This is different from dirstate.status because it doesn't care about
769 whether files are modified or clean.'''
630 whether files are modified or clean.'''
770 added, unknown, deleted, removed = [], [], [], []
631 added, unknown, deleted, removed = [], [], [], []
771 audit_path = pathauditor(repo.root)
632 audit_path = pathutil.pathauditor(repo.root)
772
633
773 ctx = repo[None]
634 ctx = repo[None]
774 dirstate = repo.dirstate
635 dirstate = repo.dirstate
775 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
636 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
776 full=False)
637 full=False)
777 for abs, st in walkresults.iteritems():
638 for abs, st in walkresults.iteritems():
778 dstate = dirstate[abs]
639 dstate = dirstate[abs]
779 if dstate == '?' and audit_path.check(abs):
640 if dstate == '?' and audit_path.check(abs):
780 unknown.append(abs)
641 unknown.append(abs)
781 elif dstate != 'r' and not st:
642 elif dstate != 'r' and not st:
782 deleted.append(abs)
643 deleted.append(abs)
783 # for finding renames
644 # for finding renames
784 elif dstate == 'r':
645 elif dstate == 'r':
785 removed.append(abs)
646 removed.append(abs)
786 elif dstate == 'a':
647 elif dstate == 'a':
787 added.append(abs)
648 added.append(abs)
788
649
789 return added, unknown, deleted, removed
650 return added, unknown, deleted, removed
790
651
791 def _findrenames(repo, matcher, added, removed, similarity):
652 def _findrenames(repo, matcher, added, removed, similarity):
792 '''Find renames from removed files to added ones.'''
653 '''Find renames from removed files to added ones.'''
793 renames = {}
654 renames = {}
794 if similarity > 0:
655 if similarity > 0:
795 for old, new, score in similar.findrenames(repo, added, removed,
656 for old, new, score in similar.findrenames(repo, added, removed,
796 similarity):
657 similarity):
797 if (repo.ui.verbose or not matcher.exact(old)
658 if (repo.ui.verbose or not matcher.exact(old)
798 or not matcher.exact(new)):
659 or not matcher.exact(new)):
799 repo.ui.status(_('recording removal of %s as rename to %s '
660 repo.ui.status(_('recording removal of %s as rename to %s '
800 '(%d%% similar)\n') %
661 '(%d%% similar)\n') %
801 (matcher.rel(old), matcher.rel(new),
662 (matcher.rel(old), matcher.rel(new),
802 score * 100))
663 score * 100))
803 renames[new] = old
664 renames[new] = old
804 return renames
665 return renames
805
666
806 def _markchanges(repo, unknown, deleted, renames):
667 def _markchanges(repo, unknown, deleted, renames):
807 '''Marks the files in unknown as added, the files in deleted as removed,
668 '''Marks the files in unknown as added, the files in deleted as removed,
808 and the files in renames as copied.'''
669 and the files in renames as copied.'''
809 wctx = repo[None]
670 wctx = repo[None]
810 wlock = repo.wlock()
671 wlock = repo.wlock()
811 try:
672 try:
812 wctx.forget(deleted)
673 wctx.forget(deleted)
813 wctx.add(unknown)
674 wctx.add(unknown)
814 for new, old in renames.iteritems():
675 for new, old in renames.iteritems():
815 wctx.copy(old, new)
676 wctx.copy(old, new)
816 finally:
677 finally:
817 wlock.release()
678 wlock.release()
818
679
819 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
680 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
820 """Update the dirstate to reflect the intent of copying src to dst. For
681 """Update the dirstate to reflect the intent of copying src to dst. For
821 different reasons it might not end with dst being marked as copied from src.
682 different reasons it might not end with dst being marked as copied from src.
822 """
683 """
823 origsrc = repo.dirstate.copied(src) or src
684 origsrc = repo.dirstate.copied(src) or src
824 if dst == origsrc: # copying back a copy?
685 if dst == origsrc: # copying back a copy?
825 if repo.dirstate[dst] not in 'mn' and not dryrun:
686 if repo.dirstate[dst] not in 'mn' and not dryrun:
826 repo.dirstate.normallookup(dst)
687 repo.dirstate.normallookup(dst)
827 else:
688 else:
828 if repo.dirstate[origsrc] == 'a' and origsrc == src:
689 if repo.dirstate[origsrc] == 'a' and origsrc == src:
829 if not ui.quiet:
690 if not ui.quiet:
830 ui.warn(_("%s has not been committed yet, so no copy "
691 ui.warn(_("%s has not been committed yet, so no copy "
831 "data will be stored for %s.\n")
692 "data will be stored for %s.\n")
832 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
693 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
833 if repo.dirstate[dst] in '?r' and not dryrun:
694 if repo.dirstate[dst] in '?r' and not dryrun:
834 wctx.add([dst])
695 wctx.add([dst])
835 elif not dryrun:
696 elif not dryrun:
836 wctx.copy(origsrc, dst)
697 wctx.copy(origsrc, dst)
837
698
838 def readrequires(opener, supported):
699 def readrequires(opener, supported):
839 '''Reads and parses .hg/requires and checks if all entries found
700 '''Reads and parses .hg/requires and checks if all entries found
840 are in the list of supported features.'''
701 are in the list of supported features.'''
841 requirements = set(opener.read("requires").splitlines())
702 requirements = set(opener.read("requires").splitlines())
842 missings = []
703 missings = []
843 for r in requirements:
704 for r in requirements:
844 if r not in supported:
705 if r not in supported:
845 if not r or not r[0].isalnum():
706 if not r or not r[0].isalnum():
846 raise error.RequirementError(_(".hg/requires file is corrupt"))
707 raise error.RequirementError(_(".hg/requires file is corrupt"))
847 missings.append(r)
708 missings.append(r)
848 missings.sort()
709 missings.sort()
849 if missings:
710 if missings:
850 raise error.RequirementError(
711 raise error.RequirementError(
851 _("unknown repository format: requires features '%s' (upgrade "
712 _("unknown repository format: requires features '%s' (upgrade "
852 "Mercurial)") % "', '".join(missings))
713 "Mercurial)") % "', '".join(missings))
853 return requirements
714 return requirements
854
715
855 class filecacheentry(object):
716 class filecacheentry(object):
856 def __init__(self, path, stat=True):
717 def __init__(self, path, stat=True):
857 self.path = path
718 self.path = path
858 self.cachestat = None
719 self.cachestat = None
859 self._cacheable = None
720 self._cacheable = None
860
721
861 if stat:
722 if stat:
862 self.cachestat = filecacheentry.stat(self.path)
723 self.cachestat = filecacheentry.stat(self.path)
863
724
864 if self.cachestat:
725 if self.cachestat:
865 self._cacheable = self.cachestat.cacheable()
726 self._cacheable = self.cachestat.cacheable()
866 else:
727 else:
867 # None means we don't know yet
728 # None means we don't know yet
868 self._cacheable = None
729 self._cacheable = None
869
730
870 def refresh(self):
731 def refresh(self):
871 if self.cacheable():
732 if self.cacheable():
872 self.cachestat = filecacheentry.stat(self.path)
733 self.cachestat = filecacheentry.stat(self.path)
873
734
874 def cacheable(self):
735 def cacheable(self):
875 if self._cacheable is not None:
736 if self._cacheable is not None:
876 return self._cacheable
737 return self._cacheable
877
738
878 # we don't know yet, assume it is for now
739 # we don't know yet, assume it is for now
879 return True
740 return True
880
741
881 def changed(self):
742 def changed(self):
882 # no point in going further if we can't cache it
743 # no point in going further if we can't cache it
883 if not self.cacheable():
744 if not self.cacheable():
884 return True
745 return True
885
746
886 newstat = filecacheentry.stat(self.path)
747 newstat = filecacheentry.stat(self.path)
887
748
888 # we may not know if it's cacheable yet, check again now
749 # we may not know if it's cacheable yet, check again now
889 if newstat and self._cacheable is None:
750 if newstat and self._cacheable is None:
890 self._cacheable = newstat.cacheable()
751 self._cacheable = newstat.cacheable()
891
752
892 # check again
753 # check again
893 if not self._cacheable:
754 if not self._cacheable:
894 return True
755 return True
895
756
896 if self.cachestat != newstat:
757 if self.cachestat != newstat:
897 self.cachestat = newstat
758 self.cachestat = newstat
898 return True
759 return True
899 else:
760 else:
900 return False
761 return False
901
762
902 @staticmethod
763 @staticmethod
903 def stat(path):
764 def stat(path):
904 try:
765 try:
905 return util.cachestat(path)
766 return util.cachestat(path)
906 except OSError, e:
767 except OSError, e:
907 if e.errno != errno.ENOENT:
768 if e.errno != errno.ENOENT:
908 raise
769 raise
909
770
910 class filecache(object):
771 class filecache(object):
911 '''A property like decorator that tracks a file under .hg/ for updates.
772 '''A property like decorator that tracks a file under .hg/ for updates.
912
773
913 Records stat info when called in _filecache.
774 Records stat info when called in _filecache.
914
775
915 On subsequent calls, compares old stat info with new info, and recreates
776 On subsequent calls, compares old stat info with new info, and recreates
916 the object when needed, updating the new stat info in _filecache.
777 the object when needed, updating the new stat info in _filecache.
917
778
918 Mercurial either atomic renames or appends for files under .hg,
779 Mercurial either atomic renames or appends for files under .hg,
919 so to ensure the cache is reliable we need the filesystem to be able
780 so to ensure the cache is reliable we need the filesystem to be able
920 to tell us if a file has been replaced. If it can't, we fallback to
781 to tell us if a file has been replaced. If it can't, we fallback to
921 recreating the object on every call (essentially the same behaviour as
782 recreating the object on every call (essentially the same behaviour as
922 propertycache).'''
783 propertycache).'''
923 def __init__(self, path):
784 def __init__(self, path):
924 self.path = path
785 self.path = path
925
786
926 def join(self, obj, fname):
787 def join(self, obj, fname):
927 """Used to compute the runtime path of the cached file.
788 """Used to compute the runtime path of the cached file.
928
789
929 Users should subclass filecache and provide their own version of this
790 Users should subclass filecache and provide their own version of this
930 function to call the appropriate join function on 'obj' (an instance
791 function to call the appropriate join function on 'obj' (an instance
931 of the class that its member function was decorated).
792 of the class that its member function was decorated).
932 """
793 """
933 return obj.join(fname)
794 return obj.join(fname)
934
795
935 def __call__(self, func):
796 def __call__(self, func):
936 self.func = func
797 self.func = func
937 self.name = func.__name__
798 self.name = func.__name__
938 return self
799 return self
939
800
940 def __get__(self, obj, type=None):
801 def __get__(self, obj, type=None):
941 # do we need to check if the file changed?
802 # do we need to check if the file changed?
942 if self.name in obj.__dict__:
803 if self.name in obj.__dict__:
943 assert self.name in obj._filecache, self.name
804 assert self.name in obj._filecache, self.name
944 return obj.__dict__[self.name]
805 return obj.__dict__[self.name]
945
806
946 entry = obj._filecache.get(self.name)
807 entry = obj._filecache.get(self.name)
947
808
948 if entry:
809 if entry:
949 if entry.changed():
810 if entry.changed():
950 entry.obj = self.func(obj)
811 entry.obj = self.func(obj)
951 else:
812 else:
952 path = self.join(obj, self.path)
813 path = self.join(obj, self.path)
953
814
954 # We stat -before- creating the object so our cache doesn't lie if
815 # We stat -before- creating the object so our cache doesn't lie if
955 # a writer modified between the time we read and stat
816 # a writer modified between the time we read and stat
956 entry = filecacheentry(path)
817 entry = filecacheentry(path)
957 entry.obj = self.func(obj)
818 entry.obj = self.func(obj)
958
819
959 obj._filecache[self.name] = entry
820 obj._filecache[self.name] = entry
960
821
961 obj.__dict__[self.name] = entry.obj
822 obj.__dict__[self.name] = entry.obj
962 return entry.obj
823 return entry.obj
963
824
964 def __set__(self, obj, value):
825 def __set__(self, obj, value):
965 if self.name not in obj._filecache:
826 if self.name not in obj._filecache:
966 # we add an entry for the missing value because X in __dict__
827 # we add an entry for the missing value because X in __dict__
967 # implies X in _filecache
828 # implies X in _filecache
968 ce = filecacheentry(self.join(obj, self.path), False)
829 ce = filecacheentry(self.join(obj, self.path), False)
969 obj._filecache[self.name] = ce
830 obj._filecache[self.name] = ce
970 else:
831 else:
971 ce = obj._filecache[self.name]
832 ce = obj._filecache[self.name]
972
833
973 ce.obj = value # update cached copy
834 ce.obj = value # update cached copy
974 obj.__dict__[self.name] = value # update copy returned by obj.x
835 obj.__dict__[self.name] = value # update copy returned by obj.x
975
836
976 def __delete__(self, obj):
837 def __delete__(self, obj):
977 try:
838 try:
978 del obj.__dict__[self.name]
839 del obj.__dict__[self.name]
979 except KeyError:
840 except KeyError:
980 raise AttributeError(self.name)
841 raise AttributeError(self.name)
981
842
982 class dirs(object):
843 class dirs(object):
983 '''a multiset of directory names from a dirstate or manifest'''
844 '''a multiset of directory names from a dirstate or manifest'''
984
845
985 def __init__(self, map, skip=None):
846 def __init__(self, map, skip=None):
986 self._dirs = {}
847 self._dirs = {}
987 addpath = self.addpath
848 addpath = self.addpath
988 if util.safehasattr(map, 'iteritems') and skip is not None:
849 if util.safehasattr(map, 'iteritems') and skip is not None:
989 for f, s in map.iteritems():
850 for f, s in map.iteritems():
990 if s[0] != skip:
851 if s[0] != skip:
991 addpath(f)
852 addpath(f)
992 else:
853 else:
993 for f in map:
854 for f in map:
994 addpath(f)
855 addpath(f)
995
856
996 def addpath(self, path):
857 def addpath(self, path):
997 dirs = self._dirs
858 dirs = self._dirs
998 for base in finddirs(path):
859 for base in finddirs(path):
999 if base in dirs:
860 if base in dirs:
1000 dirs[base] += 1
861 dirs[base] += 1
1001 return
862 return
1002 dirs[base] = 1
863 dirs[base] = 1
1003
864
1004 def delpath(self, path):
865 def delpath(self, path):
1005 dirs = self._dirs
866 dirs = self._dirs
1006 for base in finddirs(path):
867 for base in finddirs(path):
1007 if dirs[base] > 1:
868 if dirs[base] > 1:
1008 dirs[base] -= 1
869 dirs[base] -= 1
1009 return
870 return
1010 del dirs[base]
871 del dirs[base]
1011
872
1012 def __iter__(self):
873 def __iter__(self):
1013 return self._dirs.iterkeys()
874 return self._dirs.iterkeys()
1014
875
1015 def __contains__(self, d):
876 def __contains__(self, d):
1016 return d in self._dirs
877 return d in self._dirs
1017
878
1018 if util.safehasattr(parsers, 'dirs'):
879 if util.safehasattr(parsers, 'dirs'):
1019 dirs = parsers.dirs
880 dirs = parsers.dirs
1020
881
1021 def finddirs(path):
882 def finddirs(path):
1022 pos = path.rfind('/')
883 pos = path.rfind('/')
1023 while pos != -1:
884 while pos != -1:
1024 yield path[:pos]
885 yield path[:pos]
1025 pos = path.rfind('/', 0, pos)
886 pos = path.rfind('/', 0, pos)
@@ -1,1469 +1,1470 b''
1 # subrepo.py - sub-repository handling for Mercurial
1 # subrepo.py - sub-repository handling for Mercurial
2 #
2 #
3 # Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import errno, os, re, shutil, posixpath, sys
8 import errno, os, re, shutil, posixpath, sys
9 import xml.dom.minidom
9 import xml.dom.minidom
10 import stat, subprocess, tarfile
10 import stat, subprocess, tarfile
11 from i18n import _
11 from i18n import _
12 import config, scmutil, util, node, error, cmdutil, bookmarks, match as matchmod
12 import config, util, node, error, cmdutil, bookmarks, match as matchmod
13 import pathutil
13 hg = None
14 hg = None
14 propertycache = util.propertycache
15 propertycache = util.propertycache
15
16
16 nullstate = ('', '', 'empty')
17 nullstate = ('', '', 'empty')
17
18
18 def _expandedabspath(path):
19 def _expandedabspath(path):
19 '''
20 '''
20 get a path or url and if it is a path expand it and return an absolute path
21 get a path or url and if it is a path expand it and return an absolute path
21 '''
22 '''
22 expandedpath = util.urllocalpath(util.expandpath(path))
23 expandedpath = util.urllocalpath(util.expandpath(path))
23 u = util.url(expandedpath)
24 u = util.url(expandedpath)
24 if not u.scheme:
25 if not u.scheme:
25 path = util.normpath(os.path.abspath(u.path))
26 path = util.normpath(os.path.abspath(u.path))
26 return path
27 return path
27
28
28 def _getstorehashcachename(remotepath):
29 def _getstorehashcachename(remotepath):
29 '''get a unique filename for the store hash cache of a remote repository'''
30 '''get a unique filename for the store hash cache of a remote repository'''
30 return util.sha1(_expandedabspath(remotepath)).hexdigest()[0:12]
31 return util.sha1(_expandedabspath(remotepath)).hexdigest()[0:12]
31
32
32 def _calcfilehash(filename):
33 def _calcfilehash(filename):
33 data = ''
34 data = ''
34 if os.path.exists(filename):
35 if os.path.exists(filename):
35 fd = open(filename, 'rb')
36 fd = open(filename, 'rb')
36 data = fd.read()
37 data = fd.read()
37 fd.close()
38 fd.close()
38 return util.sha1(data).hexdigest()
39 return util.sha1(data).hexdigest()
39
40
40 class SubrepoAbort(error.Abort):
41 class SubrepoAbort(error.Abort):
41 """Exception class used to avoid handling a subrepo error more than once"""
42 """Exception class used to avoid handling a subrepo error more than once"""
42 def __init__(self, *args, **kw):
43 def __init__(self, *args, **kw):
43 error.Abort.__init__(self, *args, **kw)
44 error.Abort.__init__(self, *args, **kw)
44 self.subrepo = kw.get('subrepo')
45 self.subrepo = kw.get('subrepo')
45 self.cause = kw.get('cause')
46 self.cause = kw.get('cause')
46
47
47 def annotatesubrepoerror(func):
48 def annotatesubrepoerror(func):
48 def decoratedmethod(self, *args, **kargs):
49 def decoratedmethod(self, *args, **kargs):
49 try:
50 try:
50 res = func(self, *args, **kargs)
51 res = func(self, *args, **kargs)
51 except SubrepoAbort, ex:
52 except SubrepoAbort, ex:
52 # This exception has already been handled
53 # This exception has already been handled
53 raise ex
54 raise ex
54 except error.Abort, ex:
55 except error.Abort, ex:
55 subrepo = subrelpath(self)
56 subrepo = subrelpath(self)
56 errormsg = str(ex) + ' ' + _('(in subrepo %s)') % subrepo
57 errormsg = str(ex) + ' ' + _('(in subrepo %s)') % subrepo
57 # avoid handling this exception by raising a SubrepoAbort exception
58 # avoid handling this exception by raising a SubrepoAbort exception
58 raise SubrepoAbort(errormsg, hint=ex.hint, subrepo=subrepo,
59 raise SubrepoAbort(errormsg, hint=ex.hint, subrepo=subrepo,
59 cause=sys.exc_info())
60 cause=sys.exc_info())
60 return res
61 return res
61 return decoratedmethod
62 return decoratedmethod
62
63
63 def state(ctx, ui):
64 def state(ctx, ui):
64 """return a state dict, mapping subrepo paths configured in .hgsub
65 """return a state dict, mapping subrepo paths configured in .hgsub
65 to tuple: (source from .hgsub, revision from .hgsubstate, kind
66 to tuple: (source from .hgsub, revision from .hgsubstate, kind
66 (key in types dict))
67 (key in types dict))
67 """
68 """
68 p = config.config()
69 p = config.config()
69 def read(f, sections=None, remap=None):
70 def read(f, sections=None, remap=None):
70 if f in ctx:
71 if f in ctx:
71 try:
72 try:
72 data = ctx[f].data()
73 data = ctx[f].data()
73 except IOError, err:
74 except IOError, err:
74 if err.errno != errno.ENOENT:
75 if err.errno != errno.ENOENT:
75 raise
76 raise
76 # handle missing subrepo spec files as removed
77 # handle missing subrepo spec files as removed
77 ui.warn(_("warning: subrepo spec file %s not found\n") % f)
78 ui.warn(_("warning: subrepo spec file %s not found\n") % f)
78 return
79 return
79 p.parse(f, data, sections, remap, read)
80 p.parse(f, data, sections, remap, read)
80 else:
81 else:
81 raise util.Abort(_("subrepo spec file %s not found") % f)
82 raise util.Abort(_("subrepo spec file %s not found") % f)
82
83
83 if '.hgsub' in ctx:
84 if '.hgsub' in ctx:
84 read('.hgsub')
85 read('.hgsub')
85
86
86 for path, src in ui.configitems('subpaths'):
87 for path, src in ui.configitems('subpaths'):
87 p.set('subpaths', path, src, ui.configsource('subpaths', path))
88 p.set('subpaths', path, src, ui.configsource('subpaths', path))
88
89
89 rev = {}
90 rev = {}
90 if '.hgsubstate' in ctx:
91 if '.hgsubstate' in ctx:
91 try:
92 try:
92 for i, l in enumerate(ctx['.hgsubstate'].data().splitlines()):
93 for i, l in enumerate(ctx['.hgsubstate'].data().splitlines()):
93 l = l.lstrip()
94 l = l.lstrip()
94 if not l:
95 if not l:
95 continue
96 continue
96 try:
97 try:
97 revision, path = l.split(" ", 1)
98 revision, path = l.split(" ", 1)
98 except ValueError:
99 except ValueError:
99 raise util.Abort(_("invalid subrepository revision "
100 raise util.Abort(_("invalid subrepository revision "
100 "specifier in .hgsubstate line %d")
101 "specifier in .hgsubstate line %d")
101 % (i + 1))
102 % (i + 1))
102 rev[path] = revision
103 rev[path] = revision
103 except IOError, err:
104 except IOError, err:
104 if err.errno != errno.ENOENT:
105 if err.errno != errno.ENOENT:
105 raise
106 raise
106
107
107 def remap(src):
108 def remap(src):
108 for pattern, repl in p.items('subpaths'):
109 for pattern, repl in p.items('subpaths'):
109 # Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub
110 # Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub
110 # does a string decode.
111 # does a string decode.
111 repl = repl.encode('string-escape')
112 repl = repl.encode('string-escape')
112 # However, we still want to allow back references to go
113 # However, we still want to allow back references to go
113 # through unharmed, so we turn r'\\1' into r'\1'. Again,
114 # through unharmed, so we turn r'\\1' into r'\1'. Again,
114 # extra escapes are needed because re.sub string decodes.
115 # extra escapes are needed because re.sub string decodes.
115 repl = re.sub(r'\\\\([0-9]+)', r'\\\1', repl)
116 repl = re.sub(r'\\\\([0-9]+)', r'\\\1', repl)
116 try:
117 try:
117 src = re.sub(pattern, repl, src, 1)
118 src = re.sub(pattern, repl, src, 1)
118 except re.error, e:
119 except re.error, e:
119 raise util.Abort(_("bad subrepository pattern in %s: %s")
120 raise util.Abort(_("bad subrepository pattern in %s: %s")
120 % (p.source('subpaths', pattern), e))
121 % (p.source('subpaths', pattern), e))
121 return src
122 return src
122
123
123 state = {}
124 state = {}
124 for path, src in p[''].items():
125 for path, src in p[''].items():
125 kind = 'hg'
126 kind = 'hg'
126 if src.startswith('['):
127 if src.startswith('['):
127 if ']' not in src:
128 if ']' not in src:
128 raise util.Abort(_('missing ] in subrepo source'))
129 raise util.Abort(_('missing ] in subrepo source'))
129 kind, src = src.split(']', 1)
130 kind, src = src.split(']', 1)
130 kind = kind[1:]
131 kind = kind[1:]
131 src = src.lstrip() # strip any extra whitespace after ']'
132 src = src.lstrip() # strip any extra whitespace after ']'
132
133
133 if not util.url(src).isabs():
134 if not util.url(src).isabs():
134 parent = _abssource(ctx._repo, abort=False)
135 parent = _abssource(ctx._repo, abort=False)
135 if parent:
136 if parent:
136 parent = util.url(parent)
137 parent = util.url(parent)
137 parent.path = posixpath.join(parent.path or '', src)
138 parent.path = posixpath.join(parent.path or '', src)
138 parent.path = posixpath.normpath(parent.path)
139 parent.path = posixpath.normpath(parent.path)
139 joined = str(parent)
140 joined = str(parent)
140 # Remap the full joined path and use it if it changes,
141 # Remap the full joined path and use it if it changes,
141 # else remap the original source.
142 # else remap the original source.
142 remapped = remap(joined)
143 remapped = remap(joined)
143 if remapped == joined:
144 if remapped == joined:
144 src = remap(src)
145 src = remap(src)
145 else:
146 else:
146 src = remapped
147 src = remapped
147
148
148 src = remap(src)
149 src = remap(src)
149 state[util.pconvert(path)] = (src.strip(), rev.get(path, ''), kind)
150 state[util.pconvert(path)] = (src.strip(), rev.get(path, ''), kind)
150
151
151 return state
152 return state
152
153
153 def writestate(repo, state):
154 def writestate(repo, state):
154 """rewrite .hgsubstate in (outer) repo with these subrepo states"""
155 """rewrite .hgsubstate in (outer) repo with these subrepo states"""
155 lines = ['%s %s\n' % (state[s][1], s) for s in sorted(state)]
156 lines = ['%s %s\n' % (state[s][1], s) for s in sorted(state)]
156 repo.wwrite('.hgsubstate', ''.join(lines), '')
157 repo.wwrite('.hgsubstate', ''.join(lines), '')
157
158
158 def submerge(repo, wctx, mctx, actx, overwrite):
159 def submerge(repo, wctx, mctx, actx, overwrite):
159 """delegated from merge.applyupdates: merging of .hgsubstate file
160 """delegated from merge.applyupdates: merging of .hgsubstate file
160 in working context, merging context and ancestor context"""
161 in working context, merging context and ancestor context"""
161 if mctx == actx: # backwards?
162 if mctx == actx: # backwards?
162 actx = wctx.p1()
163 actx = wctx.p1()
163 s1 = wctx.substate
164 s1 = wctx.substate
164 s2 = mctx.substate
165 s2 = mctx.substate
165 sa = actx.substate
166 sa = actx.substate
166 sm = {}
167 sm = {}
167
168
168 repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx))
169 repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx))
169
170
170 def debug(s, msg, r=""):
171 def debug(s, msg, r=""):
171 if r:
172 if r:
172 r = "%s:%s:%s" % r
173 r = "%s:%s:%s" % r
173 repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r))
174 repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r))
174
175
175 for s, l in sorted(s1.iteritems()):
176 for s, l in sorted(s1.iteritems()):
176 a = sa.get(s, nullstate)
177 a = sa.get(s, nullstate)
177 ld = l # local state with possible dirty flag for compares
178 ld = l # local state with possible dirty flag for compares
178 if wctx.sub(s).dirty():
179 if wctx.sub(s).dirty():
179 ld = (l[0], l[1] + "+")
180 ld = (l[0], l[1] + "+")
180 if wctx == actx: # overwrite
181 if wctx == actx: # overwrite
181 a = ld
182 a = ld
182
183
183 if s in s2:
184 if s in s2:
184 r = s2[s]
185 r = s2[s]
185 if ld == r or r == a: # no change or local is newer
186 if ld == r or r == a: # no change or local is newer
186 sm[s] = l
187 sm[s] = l
187 continue
188 continue
188 elif ld == a: # other side changed
189 elif ld == a: # other side changed
189 debug(s, "other changed, get", r)
190 debug(s, "other changed, get", r)
190 wctx.sub(s).get(r, overwrite)
191 wctx.sub(s).get(r, overwrite)
191 sm[s] = r
192 sm[s] = r
192 elif ld[0] != r[0]: # sources differ
193 elif ld[0] != r[0]: # sources differ
193 if repo.ui.promptchoice(
194 if repo.ui.promptchoice(
194 _(' subrepository sources for %s differ\n'
195 _(' subrepository sources for %s differ\n'
195 'use (l)ocal source (%s) or (r)emote source (%s)?'
196 'use (l)ocal source (%s) or (r)emote source (%s)?'
196 '$$ &Local $$ &Remote') % (s, l[0], r[0]), 0):
197 '$$ &Local $$ &Remote') % (s, l[0], r[0]), 0):
197 debug(s, "prompt changed, get", r)
198 debug(s, "prompt changed, get", r)
198 wctx.sub(s).get(r, overwrite)
199 wctx.sub(s).get(r, overwrite)
199 sm[s] = r
200 sm[s] = r
200 elif ld[1] == a[1]: # local side is unchanged
201 elif ld[1] == a[1]: # local side is unchanged
201 debug(s, "other side changed, get", r)
202 debug(s, "other side changed, get", r)
202 wctx.sub(s).get(r, overwrite)
203 wctx.sub(s).get(r, overwrite)
203 sm[s] = r
204 sm[s] = r
204 else:
205 else:
205 debug(s, "both sides changed")
206 debug(s, "both sides changed")
206 option = repo.ui.promptchoice(
207 option = repo.ui.promptchoice(
207 _(' subrepository %s diverged (local revision: %s, '
208 _(' subrepository %s diverged (local revision: %s, '
208 'remote revision: %s)\n'
209 'remote revision: %s)\n'
209 '(M)erge, keep (l)ocal or keep (r)emote?'
210 '(M)erge, keep (l)ocal or keep (r)emote?'
210 '$$ &Merge $$ &Local $$ &Remote')
211 '$$ &Merge $$ &Local $$ &Remote')
211 % (s, l[1][:12], r[1][:12]), 0)
212 % (s, l[1][:12], r[1][:12]), 0)
212 if option == 0:
213 if option == 0:
213 wctx.sub(s).merge(r)
214 wctx.sub(s).merge(r)
214 sm[s] = l
215 sm[s] = l
215 debug(s, "merge with", r)
216 debug(s, "merge with", r)
216 elif option == 1:
217 elif option == 1:
217 sm[s] = l
218 sm[s] = l
218 debug(s, "keep local subrepo revision", l)
219 debug(s, "keep local subrepo revision", l)
219 else:
220 else:
220 wctx.sub(s).get(r, overwrite)
221 wctx.sub(s).get(r, overwrite)
221 sm[s] = r
222 sm[s] = r
222 debug(s, "get remote subrepo revision", r)
223 debug(s, "get remote subrepo revision", r)
223 elif ld == a: # remote removed, local unchanged
224 elif ld == a: # remote removed, local unchanged
224 debug(s, "remote removed, remove")
225 debug(s, "remote removed, remove")
225 wctx.sub(s).remove()
226 wctx.sub(s).remove()
226 elif a == nullstate: # not present in remote or ancestor
227 elif a == nullstate: # not present in remote or ancestor
227 debug(s, "local added, keep")
228 debug(s, "local added, keep")
228 sm[s] = l
229 sm[s] = l
229 continue
230 continue
230 else:
231 else:
231 if repo.ui.promptchoice(
232 if repo.ui.promptchoice(
232 _(' local changed subrepository %s which remote removed\n'
233 _(' local changed subrepository %s which remote removed\n'
233 'use (c)hanged version or (d)elete?'
234 'use (c)hanged version or (d)elete?'
234 '$$ &Changed $$ &Delete') % s, 0):
235 '$$ &Changed $$ &Delete') % s, 0):
235 debug(s, "prompt remove")
236 debug(s, "prompt remove")
236 wctx.sub(s).remove()
237 wctx.sub(s).remove()
237
238
238 for s, r in sorted(s2.items()):
239 for s, r in sorted(s2.items()):
239 if s in s1:
240 if s in s1:
240 continue
241 continue
241 elif s not in sa:
242 elif s not in sa:
242 debug(s, "remote added, get", r)
243 debug(s, "remote added, get", r)
243 mctx.sub(s).get(r)
244 mctx.sub(s).get(r)
244 sm[s] = r
245 sm[s] = r
245 elif r != sa[s]:
246 elif r != sa[s]:
246 if repo.ui.promptchoice(
247 if repo.ui.promptchoice(
247 _(' remote changed subrepository %s which local removed\n'
248 _(' remote changed subrepository %s which local removed\n'
248 'use (c)hanged version or (d)elete?'
249 'use (c)hanged version or (d)elete?'
249 '$$ &Changed $$ &Delete') % s, 0) == 0:
250 '$$ &Changed $$ &Delete') % s, 0) == 0:
250 debug(s, "prompt recreate", r)
251 debug(s, "prompt recreate", r)
251 wctx.sub(s).get(r)
252 wctx.sub(s).get(r)
252 sm[s] = r
253 sm[s] = r
253
254
254 # record merged .hgsubstate
255 # record merged .hgsubstate
255 writestate(repo, sm)
256 writestate(repo, sm)
256 return sm
257 return sm
257
258
258 def _updateprompt(ui, sub, dirty, local, remote):
259 def _updateprompt(ui, sub, dirty, local, remote):
259 if dirty:
260 if dirty:
260 msg = (_(' subrepository sources for %s differ\n'
261 msg = (_(' subrepository sources for %s differ\n'
261 'use (l)ocal source (%s) or (r)emote source (%s)?\n'
262 'use (l)ocal source (%s) or (r)emote source (%s)?\n'
262 '$$ &Local $$ &Remote')
263 '$$ &Local $$ &Remote')
263 % (subrelpath(sub), local, remote))
264 % (subrelpath(sub), local, remote))
264 else:
265 else:
265 msg = (_(' subrepository sources for %s differ (in checked out '
266 msg = (_(' subrepository sources for %s differ (in checked out '
266 'version)\n'
267 'version)\n'
267 'use (l)ocal source (%s) or (r)emote source (%s)?\n'
268 'use (l)ocal source (%s) or (r)emote source (%s)?\n'
268 '$$ &Local $$ &Remote')
269 '$$ &Local $$ &Remote')
269 % (subrelpath(sub), local, remote))
270 % (subrelpath(sub), local, remote))
270 return ui.promptchoice(msg, 0)
271 return ui.promptchoice(msg, 0)
271
272
272 def reporelpath(repo):
273 def reporelpath(repo):
273 """return path to this (sub)repo as seen from outermost repo"""
274 """return path to this (sub)repo as seen from outermost repo"""
274 parent = repo
275 parent = repo
275 while util.safehasattr(parent, '_subparent'):
276 while util.safehasattr(parent, '_subparent'):
276 parent = parent._subparent
277 parent = parent._subparent
277 p = parent.root.rstrip(os.sep)
278 p = parent.root.rstrip(os.sep)
278 return repo.root[len(p) + 1:]
279 return repo.root[len(p) + 1:]
279
280
280 def subrelpath(sub):
281 def subrelpath(sub):
281 """return path to this subrepo as seen from outermost repo"""
282 """return path to this subrepo as seen from outermost repo"""
282 if util.safehasattr(sub, '_relpath'):
283 if util.safehasattr(sub, '_relpath'):
283 return sub._relpath
284 return sub._relpath
284 if not util.safehasattr(sub, '_repo'):
285 if not util.safehasattr(sub, '_repo'):
285 return sub._path
286 return sub._path
286 return reporelpath(sub._repo)
287 return reporelpath(sub._repo)
287
288
288 def _abssource(repo, push=False, abort=True):
289 def _abssource(repo, push=False, abort=True):
289 """return pull/push path of repo - either based on parent repo .hgsub info
290 """return pull/push path of repo - either based on parent repo .hgsub info
290 or on the top repo config. Abort or return None if no source found."""
291 or on the top repo config. Abort or return None if no source found."""
291 if util.safehasattr(repo, '_subparent'):
292 if util.safehasattr(repo, '_subparent'):
292 source = util.url(repo._subsource)
293 source = util.url(repo._subsource)
293 if source.isabs():
294 if source.isabs():
294 return str(source)
295 return str(source)
295 source.path = posixpath.normpath(source.path)
296 source.path = posixpath.normpath(source.path)
296 parent = _abssource(repo._subparent, push, abort=False)
297 parent = _abssource(repo._subparent, push, abort=False)
297 if parent:
298 if parent:
298 parent = util.url(util.pconvert(parent))
299 parent = util.url(util.pconvert(parent))
299 parent.path = posixpath.join(parent.path or '', source.path)
300 parent.path = posixpath.join(parent.path or '', source.path)
300 parent.path = posixpath.normpath(parent.path)
301 parent.path = posixpath.normpath(parent.path)
301 return str(parent)
302 return str(parent)
302 else: # recursion reached top repo
303 else: # recursion reached top repo
303 if util.safehasattr(repo, '_subtoppath'):
304 if util.safehasattr(repo, '_subtoppath'):
304 return repo._subtoppath
305 return repo._subtoppath
305 if push and repo.ui.config('paths', 'default-push'):
306 if push and repo.ui.config('paths', 'default-push'):
306 return repo.ui.config('paths', 'default-push')
307 return repo.ui.config('paths', 'default-push')
307 if repo.ui.config('paths', 'default'):
308 if repo.ui.config('paths', 'default'):
308 return repo.ui.config('paths', 'default')
309 return repo.ui.config('paths', 'default')
309 if repo.sharedpath != repo.path:
310 if repo.sharedpath != repo.path:
310 # chop off the .hg component to get the default path form
311 # chop off the .hg component to get the default path form
311 return os.path.dirname(repo.sharedpath)
312 return os.path.dirname(repo.sharedpath)
312 if abort:
313 if abort:
313 raise util.Abort(_("default path for subrepository not found"))
314 raise util.Abort(_("default path for subrepository not found"))
314
315
315 def itersubrepos(ctx1, ctx2):
316 def itersubrepos(ctx1, ctx2):
316 """find subrepos in ctx1 or ctx2"""
317 """find subrepos in ctx1 or ctx2"""
317 # Create a (subpath, ctx) mapping where we prefer subpaths from
318 # Create a (subpath, ctx) mapping where we prefer subpaths from
318 # ctx1. The subpaths from ctx2 are important when the .hgsub file
319 # ctx1. The subpaths from ctx2 are important when the .hgsub file
319 # has been modified (in ctx2) but not yet committed (in ctx1).
320 # has been modified (in ctx2) but not yet committed (in ctx1).
320 subpaths = dict.fromkeys(ctx2.substate, ctx2)
321 subpaths = dict.fromkeys(ctx2.substate, ctx2)
321 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
322 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
322 for subpath, ctx in sorted(subpaths.iteritems()):
323 for subpath, ctx in sorted(subpaths.iteritems()):
323 yield subpath, ctx.sub(subpath)
324 yield subpath, ctx.sub(subpath)
324
325
325 def subrepo(ctx, path):
326 def subrepo(ctx, path):
326 """return instance of the right subrepo class for subrepo in path"""
327 """return instance of the right subrepo class for subrepo in path"""
327 # subrepo inherently violates our import layering rules
328 # subrepo inherently violates our import layering rules
328 # because it wants to make repo objects from deep inside the stack
329 # because it wants to make repo objects from deep inside the stack
329 # so we manually delay the circular imports to not break
330 # so we manually delay the circular imports to not break
330 # scripts that don't use our demand-loading
331 # scripts that don't use our demand-loading
331 global hg
332 global hg
332 import hg as h
333 import hg as h
333 hg = h
334 hg = h
334
335
335 scmutil.pathauditor(ctx._repo.root)(path)
336 pathutil.pathauditor(ctx._repo.root)(path)
336 state = ctx.substate[path]
337 state = ctx.substate[path]
337 if state[2] not in types:
338 if state[2] not in types:
338 raise util.Abort(_('unknown subrepo type %s') % state[2])
339 raise util.Abort(_('unknown subrepo type %s') % state[2])
339 return types[state[2]](ctx, path, state[:2])
340 return types[state[2]](ctx, path, state[:2])
340
341
341 # subrepo classes need to implement the following abstract class:
342 # subrepo classes need to implement the following abstract class:
342
343
343 class abstractsubrepo(object):
344 class abstractsubrepo(object):
344
345
345 def storeclean(self, path):
346 def storeclean(self, path):
346 """
347 """
347 returns true if the repository has not changed since it was last
348 returns true if the repository has not changed since it was last
348 cloned from or pushed to a given repository.
349 cloned from or pushed to a given repository.
349 """
350 """
350 return False
351 return False
351
352
352 def dirty(self, ignoreupdate=False):
353 def dirty(self, ignoreupdate=False):
353 """returns true if the dirstate of the subrepo is dirty or does not
354 """returns true if the dirstate of the subrepo is dirty or does not
354 match current stored state. If ignoreupdate is true, only check
355 match current stored state. If ignoreupdate is true, only check
355 whether the subrepo has uncommitted changes in its dirstate.
356 whether the subrepo has uncommitted changes in its dirstate.
356 """
357 """
357 raise NotImplementedError
358 raise NotImplementedError
358
359
359 def basestate(self):
360 def basestate(self):
360 """current working directory base state, disregarding .hgsubstate
361 """current working directory base state, disregarding .hgsubstate
361 state and working directory modifications"""
362 state and working directory modifications"""
362 raise NotImplementedError
363 raise NotImplementedError
363
364
364 def checknested(self, path):
365 def checknested(self, path):
365 """check if path is a subrepository within this repository"""
366 """check if path is a subrepository within this repository"""
366 return False
367 return False
367
368
368 def commit(self, text, user, date):
369 def commit(self, text, user, date):
369 """commit the current changes to the subrepo with the given
370 """commit the current changes to the subrepo with the given
370 log message. Use given user and date if possible. Return the
371 log message. Use given user and date if possible. Return the
371 new state of the subrepo.
372 new state of the subrepo.
372 """
373 """
373 raise NotImplementedError
374 raise NotImplementedError
374
375
375 def remove(self):
376 def remove(self):
376 """remove the subrepo
377 """remove the subrepo
377
378
378 (should verify the dirstate is not dirty first)
379 (should verify the dirstate is not dirty first)
379 """
380 """
380 raise NotImplementedError
381 raise NotImplementedError
381
382
382 def get(self, state, overwrite=False):
383 def get(self, state, overwrite=False):
383 """run whatever commands are needed to put the subrepo into
384 """run whatever commands are needed to put the subrepo into
384 this state
385 this state
385 """
386 """
386 raise NotImplementedError
387 raise NotImplementedError
387
388
388 def merge(self, state):
389 def merge(self, state):
389 """merge currently-saved state with the new state."""
390 """merge currently-saved state with the new state."""
390 raise NotImplementedError
391 raise NotImplementedError
391
392
392 def push(self, opts):
393 def push(self, opts):
393 """perform whatever action is analogous to 'hg push'
394 """perform whatever action is analogous to 'hg push'
394
395
395 This may be a no-op on some systems.
396 This may be a no-op on some systems.
396 """
397 """
397 raise NotImplementedError
398 raise NotImplementedError
398
399
399 def add(self, ui, match, dryrun, listsubrepos, prefix, explicitonly):
400 def add(self, ui, match, dryrun, listsubrepos, prefix, explicitonly):
400 return []
401 return []
401
402
402 def status(self, rev2, **opts):
403 def status(self, rev2, **opts):
403 return [], [], [], [], [], [], []
404 return [], [], [], [], [], [], []
404
405
405 def diff(self, ui, diffopts, node2, match, prefix, **opts):
406 def diff(self, ui, diffopts, node2, match, prefix, **opts):
406 pass
407 pass
407
408
408 def outgoing(self, ui, dest, opts):
409 def outgoing(self, ui, dest, opts):
409 return 1
410 return 1
410
411
411 def incoming(self, ui, source, opts):
412 def incoming(self, ui, source, opts):
412 return 1
413 return 1
413
414
414 def files(self):
415 def files(self):
415 """return filename iterator"""
416 """return filename iterator"""
416 raise NotImplementedError
417 raise NotImplementedError
417
418
418 def filedata(self, name):
419 def filedata(self, name):
419 """return file data"""
420 """return file data"""
420 raise NotImplementedError
421 raise NotImplementedError
421
422
422 def fileflags(self, name):
423 def fileflags(self, name):
423 """return file flags"""
424 """return file flags"""
424 return ''
425 return ''
425
426
426 def archive(self, ui, archiver, prefix, match=None):
427 def archive(self, ui, archiver, prefix, match=None):
427 if match is not None:
428 if match is not None:
428 files = [f for f in self.files() if match(f)]
429 files = [f for f in self.files() if match(f)]
429 else:
430 else:
430 files = self.files()
431 files = self.files()
431 total = len(files)
432 total = len(files)
432 relpath = subrelpath(self)
433 relpath = subrelpath(self)
433 ui.progress(_('archiving (%s)') % relpath, 0,
434 ui.progress(_('archiving (%s)') % relpath, 0,
434 unit=_('files'), total=total)
435 unit=_('files'), total=total)
435 for i, name in enumerate(files):
436 for i, name in enumerate(files):
436 flags = self.fileflags(name)
437 flags = self.fileflags(name)
437 mode = 'x' in flags and 0755 or 0644
438 mode = 'x' in flags and 0755 or 0644
438 symlink = 'l' in flags
439 symlink = 'l' in flags
439 archiver.addfile(os.path.join(prefix, self._path, name),
440 archiver.addfile(os.path.join(prefix, self._path, name),
440 mode, symlink, self.filedata(name))
441 mode, symlink, self.filedata(name))
441 ui.progress(_('archiving (%s)') % relpath, i + 1,
442 ui.progress(_('archiving (%s)') % relpath, i + 1,
442 unit=_('files'), total=total)
443 unit=_('files'), total=total)
443 ui.progress(_('archiving (%s)') % relpath, None)
444 ui.progress(_('archiving (%s)') % relpath, None)
444 return total
445 return total
445
446
446 def walk(self, match):
447 def walk(self, match):
447 '''
448 '''
448 walk recursively through the directory tree, finding all files
449 walk recursively through the directory tree, finding all files
449 matched by the match function
450 matched by the match function
450 '''
451 '''
451 pass
452 pass
452
453
453 def forget(self, ui, match, prefix):
454 def forget(self, ui, match, prefix):
454 return ([], [])
455 return ([], [])
455
456
456 def revert(self, ui, substate, *pats, **opts):
457 def revert(self, ui, substate, *pats, **opts):
457 ui.warn('%s: reverting %s subrepos is unsupported\n' \
458 ui.warn('%s: reverting %s subrepos is unsupported\n' \
458 % (substate[0], substate[2]))
459 % (substate[0], substate[2]))
459 return []
460 return []
460
461
461 class hgsubrepo(abstractsubrepo):
462 class hgsubrepo(abstractsubrepo):
462 def __init__(self, ctx, path, state):
463 def __init__(self, ctx, path, state):
463 self._path = path
464 self._path = path
464 self._state = state
465 self._state = state
465 r = ctx._repo
466 r = ctx._repo
466 root = r.wjoin(path)
467 root = r.wjoin(path)
467 create = False
468 create = False
468 if not os.path.exists(os.path.join(root, '.hg')):
469 if not os.path.exists(os.path.join(root, '.hg')):
469 create = True
470 create = True
470 util.makedirs(root)
471 util.makedirs(root)
471 self._repo = hg.repository(r.baseui, root, create=create)
472 self._repo = hg.repository(r.baseui, root, create=create)
472 for s, k in [('ui', 'commitsubrepos')]:
473 for s, k in [('ui', 'commitsubrepos')]:
473 v = r.ui.config(s, k)
474 v = r.ui.config(s, k)
474 if v:
475 if v:
475 self._repo.ui.setconfig(s, k, v)
476 self._repo.ui.setconfig(s, k, v)
476 self._repo.ui.setconfig('ui', '_usedassubrepo', 'True')
477 self._repo.ui.setconfig('ui', '_usedassubrepo', 'True')
477 self._initrepo(r, state[0], create)
478 self._initrepo(r, state[0], create)
478
479
479 def storeclean(self, path):
480 def storeclean(self, path):
480 clean = True
481 clean = True
481 lock = self._repo.lock()
482 lock = self._repo.lock()
482 itercache = self._calcstorehash(path)
483 itercache = self._calcstorehash(path)
483 try:
484 try:
484 for filehash in self._readstorehashcache(path):
485 for filehash in self._readstorehashcache(path):
485 if filehash != itercache.next():
486 if filehash != itercache.next():
486 clean = False
487 clean = False
487 break
488 break
488 except StopIteration:
489 except StopIteration:
489 # the cached and current pull states have a different size
490 # the cached and current pull states have a different size
490 clean = False
491 clean = False
491 if clean:
492 if clean:
492 try:
493 try:
493 itercache.next()
494 itercache.next()
494 # the cached and current pull states have a different size
495 # the cached and current pull states have a different size
495 clean = False
496 clean = False
496 except StopIteration:
497 except StopIteration:
497 pass
498 pass
498 lock.release()
499 lock.release()
499 return clean
500 return clean
500
501
501 def _calcstorehash(self, remotepath):
502 def _calcstorehash(self, remotepath):
502 '''calculate a unique "store hash"
503 '''calculate a unique "store hash"
503
504
504 This method is used to to detect when there are changes that may
505 This method is used to to detect when there are changes that may
505 require a push to a given remote path.'''
506 require a push to a given remote path.'''
506 # sort the files that will be hashed in increasing (likely) file size
507 # sort the files that will be hashed in increasing (likely) file size
507 filelist = ('bookmarks', 'store/phaseroots', 'store/00changelog.i')
508 filelist = ('bookmarks', 'store/phaseroots', 'store/00changelog.i')
508 yield '# %s\n' % _expandedabspath(remotepath)
509 yield '# %s\n' % _expandedabspath(remotepath)
509 for relname in filelist:
510 for relname in filelist:
510 absname = os.path.normpath(self._repo.join(relname))
511 absname = os.path.normpath(self._repo.join(relname))
511 yield '%s = %s\n' % (relname, _calcfilehash(absname))
512 yield '%s = %s\n' % (relname, _calcfilehash(absname))
512
513
513 def _getstorehashcachepath(self, remotepath):
514 def _getstorehashcachepath(self, remotepath):
514 '''get a unique path for the store hash cache'''
515 '''get a unique path for the store hash cache'''
515 return self._repo.join(os.path.join(
516 return self._repo.join(os.path.join(
516 'cache', 'storehash', _getstorehashcachename(remotepath)))
517 'cache', 'storehash', _getstorehashcachename(remotepath)))
517
518
518 def _readstorehashcache(self, remotepath):
519 def _readstorehashcache(self, remotepath):
519 '''read the store hash cache for a given remote repository'''
520 '''read the store hash cache for a given remote repository'''
520 cachefile = self._getstorehashcachepath(remotepath)
521 cachefile = self._getstorehashcachepath(remotepath)
521 if not os.path.exists(cachefile):
522 if not os.path.exists(cachefile):
522 return ''
523 return ''
523 fd = open(cachefile, 'r')
524 fd = open(cachefile, 'r')
524 pullstate = fd.readlines()
525 pullstate = fd.readlines()
525 fd.close()
526 fd.close()
526 return pullstate
527 return pullstate
527
528
528 def _cachestorehash(self, remotepath):
529 def _cachestorehash(self, remotepath):
529 '''cache the current store hash
530 '''cache the current store hash
530
531
531 Each remote repo requires its own store hash cache, because a subrepo
532 Each remote repo requires its own store hash cache, because a subrepo
532 store may be "clean" versus a given remote repo, but not versus another
533 store may be "clean" versus a given remote repo, but not versus another
533 '''
534 '''
534 cachefile = self._getstorehashcachepath(remotepath)
535 cachefile = self._getstorehashcachepath(remotepath)
535 lock = self._repo.lock()
536 lock = self._repo.lock()
536 storehash = list(self._calcstorehash(remotepath))
537 storehash = list(self._calcstorehash(remotepath))
537 cachedir = os.path.dirname(cachefile)
538 cachedir = os.path.dirname(cachefile)
538 if not os.path.exists(cachedir):
539 if not os.path.exists(cachedir):
539 util.makedirs(cachedir, notindexed=True)
540 util.makedirs(cachedir, notindexed=True)
540 fd = open(cachefile, 'w')
541 fd = open(cachefile, 'w')
541 fd.writelines(storehash)
542 fd.writelines(storehash)
542 fd.close()
543 fd.close()
543 lock.release()
544 lock.release()
544
545
545 @annotatesubrepoerror
546 @annotatesubrepoerror
546 def _initrepo(self, parentrepo, source, create):
547 def _initrepo(self, parentrepo, source, create):
547 self._repo._subparent = parentrepo
548 self._repo._subparent = parentrepo
548 self._repo._subsource = source
549 self._repo._subsource = source
549
550
550 if create:
551 if create:
551 fp = self._repo.opener("hgrc", "w", text=True)
552 fp = self._repo.opener("hgrc", "w", text=True)
552 fp.write('[paths]\n')
553 fp.write('[paths]\n')
553
554
554 def addpathconfig(key, value):
555 def addpathconfig(key, value):
555 if value:
556 if value:
556 fp.write('%s = %s\n' % (key, value))
557 fp.write('%s = %s\n' % (key, value))
557 self._repo.ui.setconfig('paths', key, value)
558 self._repo.ui.setconfig('paths', key, value)
558
559
559 defpath = _abssource(self._repo, abort=False)
560 defpath = _abssource(self._repo, abort=False)
560 defpushpath = _abssource(self._repo, True, abort=False)
561 defpushpath = _abssource(self._repo, True, abort=False)
561 addpathconfig('default', defpath)
562 addpathconfig('default', defpath)
562 if defpath != defpushpath:
563 if defpath != defpushpath:
563 addpathconfig('default-push', defpushpath)
564 addpathconfig('default-push', defpushpath)
564 fp.close()
565 fp.close()
565
566
566 @annotatesubrepoerror
567 @annotatesubrepoerror
567 def add(self, ui, match, dryrun, listsubrepos, prefix, explicitonly):
568 def add(self, ui, match, dryrun, listsubrepos, prefix, explicitonly):
568 return cmdutil.add(ui, self._repo, match, dryrun, listsubrepos,
569 return cmdutil.add(ui, self._repo, match, dryrun, listsubrepos,
569 os.path.join(prefix, self._path), explicitonly)
570 os.path.join(prefix, self._path), explicitonly)
570
571
571 @annotatesubrepoerror
572 @annotatesubrepoerror
572 def status(self, rev2, **opts):
573 def status(self, rev2, **opts):
573 try:
574 try:
574 rev1 = self._state[1]
575 rev1 = self._state[1]
575 ctx1 = self._repo[rev1]
576 ctx1 = self._repo[rev1]
576 ctx2 = self._repo[rev2]
577 ctx2 = self._repo[rev2]
577 return self._repo.status(ctx1, ctx2, **opts)
578 return self._repo.status(ctx1, ctx2, **opts)
578 except error.RepoLookupError, inst:
579 except error.RepoLookupError, inst:
579 self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
580 self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
580 % (inst, subrelpath(self)))
581 % (inst, subrelpath(self)))
581 return [], [], [], [], [], [], []
582 return [], [], [], [], [], [], []
582
583
583 @annotatesubrepoerror
584 @annotatesubrepoerror
584 def diff(self, ui, diffopts, node2, match, prefix, **opts):
585 def diff(self, ui, diffopts, node2, match, prefix, **opts):
585 try:
586 try:
586 node1 = node.bin(self._state[1])
587 node1 = node.bin(self._state[1])
587 # We currently expect node2 to come from substate and be
588 # We currently expect node2 to come from substate and be
588 # in hex format
589 # in hex format
589 if node2 is not None:
590 if node2 is not None:
590 node2 = node.bin(node2)
591 node2 = node.bin(node2)
591 cmdutil.diffordiffstat(ui, self._repo, diffopts,
592 cmdutil.diffordiffstat(ui, self._repo, diffopts,
592 node1, node2, match,
593 node1, node2, match,
593 prefix=posixpath.join(prefix, self._path),
594 prefix=posixpath.join(prefix, self._path),
594 listsubrepos=True, **opts)
595 listsubrepos=True, **opts)
595 except error.RepoLookupError, inst:
596 except error.RepoLookupError, inst:
596 self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
597 self._repo.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
597 % (inst, subrelpath(self)))
598 % (inst, subrelpath(self)))
598
599
599 @annotatesubrepoerror
600 @annotatesubrepoerror
600 def archive(self, ui, archiver, prefix, match=None):
601 def archive(self, ui, archiver, prefix, match=None):
601 self._get(self._state + ('hg',))
602 self._get(self._state + ('hg',))
602 total = abstractsubrepo.archive(self, ui, archiver, prefix, match)
603 total = abstractsubrepo.archive(self, ui, archiver, prefix, match)
603 rev = self._state[1]
604 rev = self._state[1]
604 ctx = self._repo[rev]
605 ctx = self._repo[rev]
605 for subpath in ctx.substate:
606 for subpath in ctx.substate:
606 s = subrepo(ctx, subpath)
607 s = subrepo(ctx, subpath)
607 submatch = matchmod.narrowmatcher(subpath, match)
608 submatch = matchmod.narrowmatcher(subpath, match)
608 total += s.archive(
609 total += s.archive(
609 ui, archiver, os.path.join(prefix, self._path), submatch)
610 ui, archiver, os.path.join(prefix, self._path), submatch)
610 return total
611 return total
611
612
612 @annotatesubrepoerror
613 @annotatesubrepoerror
613 def dirty(self, ignoreupdate=False):
614 def dirty(self, ignoreupdate=False):
614 r = self._state[1]
615 r = self._state[1]
615 if r == '' and not ignoreupdate: # no state recorded
616 if r == '' and not ignoreupdate: # no state recorded
616 return True
617 return True
617 w = self._repo[None]
618 w = self._repo[None]
618 if r != w.p1().hex() and not ignoreupdate:
619 if r != w.p1().hex() and not ignoreupdate:
619 # different version checked out
620 # different version checked out
620 return True
621 return True
621 return w.dirty() # working directory changed
622 return w.dirty() # working directory changed
622
623
623 def basestate(self):
624 def basestate(self):
624 return self._repo['.'].hex()
625 return self._repo['.'].hex()
625
626
626 def checknested(self, path):
627 def checknested(self, path):
627 return self._repo._checknested(self._repo.wjoin(path))
628 return self._repo._checknested(self._repo.wjoin(path))
628
629
629 @annotatesubrepoerror
630 @annotatesubrepoerror
630 def commit(self, text, user, date):
631 def commit(self, text, user, date):
631 # don't bother committing in the subrepo if it's only been
632 # don't bother committing in the subrepo if it's only been
632 # updated
633 # updated
633 if not self.dirty(True):
634 if not self.dirty(True):
634 return self._repo['.'].hex()
635 return self._repo['.'].hex()
635 self._repo.ui.debug("committing subrepo %s\n" % subrelpath(self))
636 self._repo.ui.debug("committing subrepo %s\n" % subrelpath(self))
636 n = self._repo.commit(text, user, date)
637 n = self._repo.commit(text, user, date)
637 if not n:
638 if not n:
638 return self._repo['.'].hex() # different version checked out
639 return self._repo['.'].hex() # different version checked out
639 return node.hex(n)
640 return node.hex(n)
640
641
641 @annotatesubrepoerror
642 @annotatesubrepoerror
642 def remove(self):
643 def remove(self):
643 # we can't fully delete the repository as it may contain
644 # we can't fully delete the repository as it may contain
644 # local-only history
645 # local-only history
645 self._repo.ui.note(_('removing subrepo %s\n') % subrelpath(self))
646 self._repo.ui.note(_('removing subrepo %s\n') % subrelpath(self))
646 hg.clean(self._repo, node.nullid, False)
647 hg.clean(self._repo, node.nullid, False)
647
648
648 def _get(self, state):
649 def _get(self, state):
649 source, revision, kind = state
650 source, revision, kind = state
650 if revision not in self._repo:
651 if revision not in self._repo:
651 self._repo._subsource = source
652 self._repo._subsource = source
652 srcurl = _abssource(self._repo)
653 srcurl = _abssource(self._repo)
653 other = hg.peer(self._repo, {}, srcurl)
654 other = hg.peer(self._repo, {}, srcurl)
654 if len(self._repo) == 0:
655 if len(self._repo) == 0:
655 self._repo.ui.status(_('cloning subrepo %s from %s\n')
656 self._repo.ui.status(_('cloning subrepo %s from %s\n')
656 % (subrelpath(self), srcurl))
657 % (subrelpath(self), srcurl))
657 parentrepo = self._repo._subparent
658 parentrepo = self._repo._subparent
658 shutil.rmtree(self._repo.path)
659 shutil.rmtree(self._repo.path)
659 other, cloned = hg.clone(self._repo._subparent.baseui, {},
660 other, cloned = hg.clone(self._repo._subparent.baseui, {},
660 other, self._repo.root,
661 other, self._repo.root,
661 update=False)
662 update=False)
662 self._repo = cloned.local()
663 self._repo = cloned.local()
663 self._initrepo(parentrepo, source, create=True)
664 self._initrepo(parentrepo, source, create=True)
664 self._cachestorehash(srcurl)
665 self._cachestorehash(srcurl)
665 else:
666 else:
666 self._repo.ui.status(_('pulling subrepo %s from %s\n')
667 self._repo.ui.status(_('pulling subrepo %s from %s\n')
667 % (subrelpath(self), srcurl))
668 % (subrelpath(self), srcurl))
668 cleansub = self.storeclean(srcurl)
669 cleansub = self.storeclean(srcurl)
669 remotebookmarks = other.listkeys('bookmarks')
670 remotebookmarks = other.listkeys('bookmarks')
670 self._repo.pull(other)
671 self._repo.pull(other)
671 bookmarks.updatefromremote(self._repo.ui, self._repo,
672 bookmarks.updatefromremote(self._repo.ui, self._repo,
672 remotebookmarks, srcurl)
673 remotebookmarks, srcurl)
673 if cleansub:
674 if cleansub:
674 # keep the repo clean after pull
675 # keep the repo clean after pull
675 self._cachestorehash(srcurl)
676 self._cachestorehash(srcurl)
676
677
677 @annotatesubrepoerror
678 @annotatesubrepoerror
678 def get(self, state, overwrite=False):
679 def get(self, state, overwrite=False):
679 self._get(state)
680 self._get(state)
680 source, revision, kind = state
681 source, revision, kind = state
681 self._repo.ui.debug("getting subrepo %s\n" % self._path)
682 self._repo.ui.debug("getting subrepo %s\n" % self._path)
682 hg.updaterepo(self._repo, revision, overwrite)
683 hg.updaterepo(self._repo, revision, overwrite)
683
684
684 @annotatesubrepoerror
685 @annotatesubrepoerror
685 def merge(self, state):
686 def merge(self, state):
686 self._get(state)
687 self._get(state)
687 cur = self._repo['.']
688 cur = self._repo['.']
688 dst = self._repo[state[1]]
689 dst = self._repo[state[1]]
689 anc = dst.ancestor(cur)
690 anc = dst.ancestor(cur)
690
691
691 def mergefunc():
692 def mergefunc():
692 if anc == cur and dst.branch() == cur.branch():
693 if anc == cur and dst.branch() == cur.branch():
693 self._repo.ui.debug("updating subrepo %s\n" % subrelpath(self))
694 self._repo.ui.debug("updating subrepo %s\n" % subrelpath(self))
694 hg.update(self._repo, state[1])
695 hg.update(self._repo, state[1])
695 elif anc == dst:
696 elif anc == dst:
696 self._repo.ui.debug("skipping subrepo %s\n" % subrelpath(self))
697 self._repo.ui.debug("skipping subrepo %s\n" % subrelpath(self))
697 else:
698 else:
698 self._repo.ui.debug("merging subrepo %s\n" % subrelpath(self))
699 self._repo.ui.debug("merging subrepo %s\n" % subrelpath(self))
699 hg.merge(self._repo, state[1], remind=False)
700 hg.merge(self._repo, state[1], remind=False)
700
701
701 wctx = self._repo[None]
702 wctx = self._repo[None]
702 if self.dirty():
703 if self.dirty():
703 if anc != dst:
704 if anc != dst:
704 if _updateprompt(self._repo.ui, self, wctx.dirty(), cur, dst):
705 if _updateprompt(self._repo.ui, self, wctx.dirty(), cur, dst):
705 mergefunc()
706 mergefunc()
706 else:
707 else:
707 mergefunc()
708 mergefunc()
708 else:
709 else:
709 mergefunc()
710 mergefunc()
710
711
711 @annotatesubrepoerror
712 @annotatesubrepoerror
712 def push(self, opts):
713 def push(self, opts):
713 force = opts.get('force')
714 force = opts.get('force')
714 newbranch = opts.get('new_branch')
715 newbranch = opts.get('new_branch')
715 ssh = opts.get('ssh')
716 ssh = opts.get('ssh')
716
717
717 # push subrepos depth-first for coherent ordering
718 # push subrepos depth-first for coherent ordering
718 c = self._repo['']
719 c = self._repo['']
719 subs = c.substate # only repos that are committed
720 subs = c.substate # only repos that are committed
720 for s in sorted(subs):
721 for s in sorted(subs):
721 if c.sub(s).push(opts) == 0:
722 if c.sub(s).push(opts) == 0:
722 return False
723 return False
723
724
724 dsturl = _abssource(self._repo, True)
725 dsturl = _abssource(self._repo, True)
725 if not force:
726 if not force:
726 if self.storeclean(dsturl):
727 if self.storeclean(dsturl):
727 self._repo.ui.status(
728 self._repo.ui.status(
728 _('no changes made to subrepo %s since last push to %s\n')
729 _('no changes made to subrepo %s since last push to %s\n')
729 % (subrelpath(self), dsturl))
730 % (subrelpath(self), dsturl))
730 return None
731 return None
731 self._repo.ui.status(_('pushing subrepo %s to %s\n') %
732 self._repo.ui.status(_('pushing subrepo %s to %s\n') %
732 (subrelpath(self), dsturl))
733 (subrelpath(self), dsturl))
733 other = hg.peer(self._repo, {'ssh': ssh}, dsturl)
734 other = hg.peer(self._repo, {'ssh': ssh}, dsturl)
734 res = self._repo.push(other, force, newbranch=newbranch)
735 res = self._repo.push(other, force, newbranch=newbranch)
735
736
736 # the repo is now clean
737 # the repo is now clean
737 self._cachestorehash(dsturl)
738 self._cachestorehash(dsturl)
738 return res
739 return res
739
740
740 @annotatesubrepoerror
741 @annotatesubrepoerror
741 def outgoing(self, ui, dest, opts):
742 def outgoing(self, ui, dest, opts):
742 return hg.outgoing(ui, self._repo, _abssource(self._repo, True), opts)
743 return hg.outgoing(ui, self._repo, _abssource(self._repo, True), opts)
743
744
744 @annotatesubrepoerror
745 @annotatesubrepoerror
745 def incoming(self, ui, source, opts):
746 def incoming(self, ui, source, opts):
746 return hg.incoming(ui, self._repo, _abssource(self._repo, False), opts)
747 return hg.incoming(ui, self._repo, _abssource(self._repo, False), opts)
747
748
748 @annotatesubrepoerror
749 @annotatesubrepoerror
749 def files(self):
750 def files(self):
750 rev = self._state[1]
751 rev = self._state[1]
751 ctx = self._repo[rev]
752 ctx = self._repo[rev]
752 return ctx.manifest()
753 return ctx.manifest()
753
754
754 def filedata(self, name):
755 def filedata(self, name):
755 rev = self._state[1]
756 rev = self._state[1]
756 return self._repo[rev][name].data()
757 return self._repo[rev][name].data()
757
758
758 def fileflags(self, name):
759 def fileflags(self, name):
759 rev = self._state[1]
760 rev = self._state[1]
760 ctx = self._repo[rev]
761 ctx = self._repo[rev]
761 return ctx.flags(name)
762 return ctx.flags(name)
762
763
763 def walk(self, match):
764 def walk(self, match):
764 ctx = self._repo[None]
765 ctx = self._repo[None]
765 return ctx.walk(match)
766 return ctx.walk(match)
766
767
767 @annotatesubrepoerror
768 @annotatesubrepoerror
768 def forget(self, ui, match, prefix):
769 def forget(self, ui, match, prefix):
769 return cmdutil.forget(ui, self._repo, match,
770 return cmdutil.forget(ui, self._repo, match,
770 os.path.join(prefix, self._path), True)
771 os.path.join(prefix, self._path), True)
771
772
772 @annotatesubrepoerror
773 @annotatesubrepoerror
773 def revert(self, ui, substate, *pats, **opts):
774 def revert(self, ui, substate, *pats, **opts):
774 # reverting a subrepo is a 2 step process:
775 # reverting a subrepo is a 2 step process:
775 # 1. if the no_backup is not set, revert all modified
776 # 1. if the no_backup is not set, revert all modified
776 # files inside the subrepo
777 # files inside the subrepo
777 # 2. update the subrepo to the revision specified in
778 # 2. update the subrepo to the revision specified in
778 # the corresponding substate dictionary
779 # the corresponding substate dictionary
779 ui.status(_('reverting subrepo %s\n') % substate[0])
780 ui.status(_('reverting subrepo %s\n') % substate[0])
780 if not opts.get('no_backup'):
781 if not opts.get('no_backup'):
781 # Revert all files on the subrepo, creating backups
782 # Revert all files on the subrepo, creating backups
782 # Note that this will not recursively revert subrepos
783 # Note that this will not recursively revert subrepos
783 # We could do it if there was a set:subrepos() predicate
784 # We could do it if there was a set:subrepos() predicate
784 opts = opts.copy()
785 opts = opts.copy()
785 opts['date'] = None
786 opts['date'] = None
786 opts['rev'] = substate[1]
787 opts['rev'] = substate[1]
787
788
788 pats = []
789 pats = []
789 if not opts.get('all'):
790 if not opts.get('all'):
790 pats = ['set:modified()']
791 pats = ['set:modified()']
791 self.filerevert(ui, *pats, **opts)
792 self.filerevert(ui, *pats, **opts)
792
793
793 # Update the repo to the revision specified in the given substate
794 # Update the repo to the revision specified in the given substate
794 self.get(substate, overwrite=True)
795 self.get(substate, overwrite=True)
795
796
796 def filerevert(self, ui, *pats, **opts):
797 def filerevert(self, ui, *pats, **opts):
797 ctx = self._repo[opts['rev']]
798 ctx = self._repo[opts['rev']]
798 parents = self._repo.dirstate.parents()
799 parents = self._repo.dirstate.parents()
799 if opts.get('all'):
800 if opts.get('all'):
800 pats = ['set:modified()']
801 pats = ['set:modified()']
801 else:
802 else:
802 pats = []
803 pats = []
803 cmdutil.revert(ui, self._repo, ctx, parents, *pats, **opts)
804 cmdutil.revert(ui, self._repo, ctx, parents, *pats, **opts)
804
805
805 class svnsubrepo(abstractsubrepo):
806 class svnsubrepo(abstractsubrepo):
806 def __init__(self, ctx, path, state):
807 def __init__(self, ctx, path, state):
807 self._path = path
808 self._path = path
808 self._state = state
809 self._state = state
809 self._ctx = ctx
810 self._ctx = ctx
810 self._ui = ctx._repo.ui
811 self._ui = ctx._repo.ui
811 self._exe = util.findexe('svn')
812 self._exe = util.findexe('svn')
812 if not self._exe:
813 if not self._exe:
813 raise util.Abort(_("'svn' executable not found for subrepo '%s'")
814 raise util.Abort(_("'svn' executable not found for subrepo '%s'")
814 % self._path)
815 % self._path)
815
816
816 def _svncommand(self, commands, filename='', failok=False):
817 def _svncommand(self, commands, filename='', failok=False):
817 cmd = [self._exe]
818 cmd = [self._exe]
818 extrakw = {}
819 extrakw = {}
819 if not self._ui.interactive():
820 if not self._ui.interactive():
820 # Making stdin be a pipe should prevent svn from behaving
821 # Making stdin be a pipe should prevent svn from behaving
821 # interactively even if we can't pass --non-interactive.
822 # interactively even if we can't pass --non-interactive.
822 extrakw['stdin'] = subprocess.PIPE
823 extrakw['stdin'] = subprocess.PIPE
823 # Starting in svn 1.5 --non-interactive is a global flag
824 # Starting in svn 1.5 --non-interactive is a global flag
824 # instead of being per-command, but we need to support 1.4 so
825 # instead of being per-command, but we need to support 1.4 so
825 # we have to be intelligent about what commands take
826 # we have to be intelligent about what commands take
826 # --non-interactive.
827 # --non-interactive.
827 if commands[0] in ('update', 'checkout', 'commit'):
828 if commands[0] in ('update', 'checkout', 'commit'):
828 cmd.append('--non-interactive')
829 cmd.append('--non-interactive')
829 cmd.extend(commands)
830 cmd.extend(commands)
830 if filename is not None:
831 if filename is not None:
831 path = os.path.join(self._ctx._repo.origroot, self._path, filename)
832 path = os.path.join(self._ctx._repo.origroot, self._path, filename)
832 cmd.append(path)
833 cmd.append(path)
833 env = dict(os.environ)
834 env = dict(os.environ)
834 # Avoid localized output, preserve current locale for everything else.
835 # Avoid localized output, preserve current locale for everything else.
835 lc_all = env.get('LC_ALL')
836 lc_all = env.get('LC_ALL')
836 if lc_all:
837 if lc_all:
837 env['LANG'] = lc_all
838 env['LANG'] = lc_all
838 del env['LC_ALL']
839 del env['LC_ALL']
839 env['LC_MESSAGES'] = 'C'
840 env['LC_MESSAGES'] = 'C'
840 p = subprocess.Popen(cmd, bufsize=-1, close_fds=util.closefds,
841 p = subprocess.Popen(cmd, bufsize=-1, close_fds=util.closefds,
841 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
842 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
842 universal_newlines=True, env=env, **extrakw)
843 universal_newlines=True, env=env, **extrakw)
843 stdout, stderr = p.communicate()
844 stdout, stderr = p.communicate()
844 stderr = stderr.strip()
845 stderr = stderr.strip()
845 if not failok:
846 if not failok:
846 if p.returncode:
847 if p.returncode:
847 raise util.Abort(stderr or 'exited with code %d' % p.returncode)
848 raise util.Abort(stderr or 'exited with code %d' % p.returncode)
848 if stderr:
849 if stderr:
849 self._ui.warn(stderr + '\n')
850 self._ui.warn(stderr + '\n')
850 return stdout, stderr
851 return stdout, stderr
851
852
852 @propertycache
853 @propertycache
853 def _svnversion(self):
854 def _svnversion(self):
854 output, err = self._svncommand(['--version', '--quiet'], filename=None)
855 output, err = self._svncommand(['--version', '--quiet'], filename=None)
855 m = re.search(r'^(\d+)\.(\d+)', output)
856 m = re.search(r'^(\d+)\.(\d+)', output)
856 if not m:
857 if not m:
857 raise util.Abort(_('cannot retrieve svn tool version'))
858 raise util.Abort(_('cannot retrieve svn tool version'))
858 return (int(m.group(1)), int(m.group(2)))
859 return (int(m.group(1)), int(m.group(2)))
859
860
860 def _wcrevs(self):
861 def _wcrevs(self):
861 # Get the working directory revision as well as the last
862 # Get the working directory revision as well as the last
862 # commit revision so we can compare the subrepo state with
863 # commit revision so we can compare the subrepo state with
863 # both. We used to store the working directory one.
864 # both. We used to store the working directory one.
864 output, err = self._svncommand(['info', '--xml'])
865 output, err = self._svncommand(['info', '--xml'])
865 doc = xml.dom.minidom.parseString(output)
866 doc = xml.dom.minidom.parseString(output)
866 entries = doc.getElementsByTagName('entry')
867 entries = doc.getElementsByTagName('entry')
867 lastrev, rev = '0', '0'
868 lastrev, rev = '0', '0'
868 if entries:
869 if entries:
869 rev = str(entries[0].getAttribute('revision')) or '0'
870 rev = str(entries[0].getAttribute('revision')) or '0'
870 commits = entries[0].getElementsByTagName('commit')
871 commits = entries[0].getElementsByTagName('commit')
871 if commits:
872 if commits:
872 lastrev = str(commits[0].getAttribute('revision')) or '0'
873 lastrev = str(commits[0].getAttribute('revision')) or '0'
873 return (lastrev, rev)
874 return (lastrev, rev)
874
875
875 def _wcrev(self):
876 def _wcrev(self):
876 return self._wcrevs()[0]
877 return self._wcrevs()[0]
877
878
878 def _wcchanged(self):
879 def _wcchanged(self):
879 """Return (changes, extchanges, missing) where changes is True
880 """Return (changes, extchanges, missing) where changes is True
880 if the working directory was changed, extchanges is
881 if the working directory was changed, extchanges is
881 True if any of these changes concern an external entry and missing
882 True if any of these changes concern an external entry and missing
882 is True if any change is a missing entry.
883 is True if any change is a missing entry.
883 """
884 """
884 output, err = self._svncommand(['status', '--xml'])
885 output, err = self._svncommand(['status', '--xml'])
885 externals, changes, missing = [], [], []
886 externals, changes, missing = [], [], []
886 doc = xml.dom.minidom.parseString(output)
887 doc = xml.dom.minidom.parseString(output)
887 for e in doc.getElementsByTagName('entry'):
888 for e in doc.getElementsByTagName('entry'):
888 s = e.getElementsByTagName('wc-status')
889 s = e.getElementsByTagName('wc-status')
889 if not s:
890 if not s:
890 continue
891 continue
891 item = s[0].getAttribute('item')
892 item = s[0].getAttribute('item')
892 props = s[0].getAttribute('props')
893 props = s[0].getAttribute('props')
893 path = e.getAttribute('path')
894 path = e.getAttribute('path')
894 if item == 'external':
895 if item == 'external':
895 externals.append(path)
896 externals.append(path)
896 elif item == 'missing':
897 elif item == 'missing':
897 missing.append(path)
898 missing.append(path)
898 if (item not in ('', 'normal', 'unversioned', 'external')
899 if (item not in ('', 'normal', 'unversioned', 'external')
899 or props not in ('', 'none', 'normal')):
900 or props not in ('', 'none', 'normal')):
900 changes.append(path)
901 changes.append(path)
901 for path in changes:
902 for path in changes:
902 for ext in externals:
903 for ext in externals:
903 if path == ext or path.startswith(ext + os.sep):
904 if path == ext or path.startswith(ext + os.sep):
904 return True, True, bool(missing)
905 return True, True, bool(missing)
905 return bool(changes), False, bool(missing)
906 return bool(changes), False, bool(missing)
906
907
907 def dirty(self, ignoreupdate=False):
908 def dirty(self, ignoreupdate=False):
908 if not self._wcchanged()[0]:
909 if not self._wcchanged()[0]:
909 if self._state[1] in self._wcrevs() or ignoreupdate:
910 if self._state[1] in self._wcrevs() or ignoreupdate:
910 return False
911 return False
911 return True
912 return True
912
913
913 def basestate(self):
914 def basestate(self):
914 lastrev, rev = self._wcrevs()
915 lastrev, rev = self._wcrevs()
915 if lastrev != rev:
916 if lastrev != rev:
916 # Last committed rev is not the same than rev. We would
917 # Last committed rev is not the same than rev. We would
917 # like to take lastrev but we do not know if the subrepo
918 # like to take lastrev but we do not know if the subrepo
918 # URL exists at lastrev. Test it and fallback to rev it
919 # URL exists at lastrev. Test it and fallback to rev it
919 # is not there.
920 # is not there.
920 try:
921 try:
921 self._svncommand(['list', '%s@%s' % (self._state[0], lastrev)])
922 self._svncommand(['list', '%s@%s' % (self._state[0], lastrev)])
922 return lastrev
923 return lastrev
923 except error.Abort:
924 except error.Abort:
924 pass
925 pass
925 return rev
926 return rev
926
927
927 @annotatesubrepoerror
928 @annotatesubrepoerror
928 def commit(self, text, user, date):
929 def commit(self, text, user, date):
929 # user and date are out of our hands since svn is centralized
930 # user and date are out of our hands since svn is centralized
930 changed, extchanged, missing = self._wcchanged()
931 changed, extchanged, missing = self._wcchanged()
931 if not changed:
932 if not changed:
932 return self.basestate()
933 return self.basestate()
933 if extchanged:
934 if extchanged:
934 # Do not try to commit externals
935 # Do not try to commit externals
935 raise util.Abort(_('cannot commit svn externals'))
936 raise util.Abort(_('cannot commit svn externals'))
936 if missing:
937 if missing:
937 # svn can commit with missing entries but aborting like hg
938 # svn can commit with missing entries but aborting like hg
938 # seems a better approach.
939 # seems a better approach.
939 raise util.Abort(_('cannot commit missing svn entries'))
940 raise util.Abort(_('cannot commit missing svn entries'))
940 commitinfo, err = self._svncommand(['commit', '-m', text])
941 commitinfo, err = self._svncommand(['commit', '-m', text])
941 self._ui.status(commitinfo)
942 self._ui.status(commitinfo)
942 newrev = re.search('Committed revision ([0-9]+).', commitinfo)
943 newrev = re.search('Committed revision ([0-9]+).', commitinfo)
943 if not newrev:
944 if not newrev:
944 if not commitinfo.strip():
945 if not commitinfo.strip():
945 # Sometimes, our definition of "changed" differs from
946 # Sometimes, our definition of "changed" differs from
946 # svn one. For instance, svn ignores missing files
947 # svn one. For instance, svn ignores missing files
947 # when committing. If there are only missing files, no
948 # when committing. If there are only missing files, no
948 # commit is made, no output and no error code.
949 # commit is made, no output and no error code.
949 raise util.Abort(_('failed to commit svn changes'))
950 raise util.Abort(_('failed to commit svn changes'))
950 raise util.Abort(commitinfo.splitlines()[-1])
951 raise util.Abort(commitinfo.splitlines()[-1])
951 newrev = newrev.groups()[0]
952 newrev = newrev.groups()[0]
952 self._ui.status(self._svncommand(['update', '-r', newrev])[0])
953 self._ui.status(self._svncommand(['update', '-r', newrev])[0])
953 return newrev
954 return newrev
954
955
955 @annotatesubrepoerror
956 @annotatesubrepoerror
956 def remove(self):
957 def remove(self):
957 if self.dirty():
958 if self.dirty():
958 self._ui.warn(_('not removing repo %s because '
959 self._ui.warn(_('not removing repo %s because '
959 'it has changes.\n' % self._path))
960 'it has changes.\n' % self._path))
960 return
961 return
961 self._ui.note(_('removing subrepo %s\n') % self._path)
962 self._ui.note(_('removing subrepo %s\n') % self._path)
962
963
963 def onerror(function, path, excinfo):
964 def onerror(function, path, excinfo):
964 if function is not os.remove:
965 if function is not os.remove:
965 raise
966 raise
966 # read-only files cannot be unlinked under Windows
967 # read-only files cannot be unlinked under Windows
967 s = os.stat(path)
968 s = os.stat(path)
968 if (s.st_mode & stat.S_IWRITE) != 0:
969 if (s.st_mode & stat.S_IWRITE) != 0:
969 raise
970 raise
970 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
971 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
971 os.remove(path)
972 os.remove(path)
972
973
973 path = self._ctx._repo.wjoin(self._path)
974 path = self._ctx._repo.wjoin(self._path)
974 shutil.rmtree(path, onerror=onerror)
975 shutil.rmtree(path, onerror=onerror)
975 try:
976 try:
976 os.removedirs(os.path.dirname(path))
977 os.removedirs(os.path.dirname(path))
977 except OSError:
978 except OSError:
978 pass
979 pass
979
980
980 @annotatesubrepoerror
981 @annotatesubrepoerror
981 def get(self, state, overwrite=False):
982 def get(self, state, overwrite=False):
982 if overwrite:
983 if overwrite:
983 self._svncommand(['revert', '--recursive'])
984 self._svncommand(['revert', '--recursive'])
984 args = ['checkout']
985 args = ['checkout']
985 if self._svnversion >= (1, 5):
986 if self._svnversion >= (1, 5):
986 args.append('--force')
987 args.append('--force')
987 # The revision must be specified at the end of the URL to properly
988 # The revision must be specified at the end of the URL to properly
988 # update to a directory which has since been deleted and recreated.
989 # update to a directory which has since been deleted and recreated.
989 args.append('%s@%s' % (state[0], state[1]))
990 args.append('%s@%s' % (state[0], state[1]))
990 status, err = self._svncommand(args, failok=True)
991 status, err = self._svncommand(args, failok=True)
991 if not re.search('Checked out revision [0-9]+.', status):
992 if not re.search('Checked out revision [0-9]+.', status):
992 if ('is already a working copy for a different URL' in err
993 if ('is already a working copy for a different URL' in err
993 and (self._wcchanged()[:2] == (False, False))):
994 and (self._wcchanged()[:2] == (False, False))):
994 # obstructed but clean working copy, so just blow it away.
995 # obstructed but clean working copy, so just blow it away.
995 self.remove()
996 self.remove()
996 self.get(state, overwrite=False)
997 self.get(state, overwrite=False)
997 return
998 return
998 raise util.Abort((status or err).splitlines()[-1])
999 raise util.Abort((status or err).splitlines()[-1])
999 self._ui.status(status)
1000 self._ui.status(status)
1000
1001
1001 @annotatesubrepoerror
1002 @annotatesubrepoerror
1002 def merge(self, state):
1003 def merge(self, state):
1003 old = self._state[1]
1004 old = self._state[1]
1004 new = state[1]
1005 new = state[1]
1005 wcrev = self._wcrev()
1006 wcrev = self._wcrev()
1006 if new != wcrev:
1007 if new != wcrev:
1007 dirty = old == wcrev or self._wcchanged()[0]
1008 dirty = old == wcrev or self._wcchanged()[0]
1008 if _updateprompt(self._ui, self, dirty, wcrev, new):
1009 if _updateprompt(self._ui, self, dirty, wcrev, new):
1009 self.get(state, False)
1010 self.get(state, False)
1010
1011
1011 def push(self, opts):
1012 def push(self, opts):
1012 # push is a no-op for SVN
1013 # push is a no-op for SVN
1013 return True
1014 return True
1014
1015
1015 @annotatesubrepoerror
1016 @annotatesubrepoerror
1016 def files(self):
1017 def files(self):
1017 output = self._svncommand(['list', '--recursive', '--xml'])[0]
1018 output = self._svncommand(['list', '--recursive', '--xml'])[0]
1018 doc = xml.dom.minidom.parseString(output)
1019 doc = xml.dom.minidom.parseString(output)
1019 paths = []
1020 paths = []
1020 for e in doc.getElementsByTagName('entry'):
1021 for e in doc.getElementsByTagName('entry'):
1021 kind = str(e.getAttribute('kind'))
1022 kind = str(e.getAttribute('kind'))
1022 if kind != 'file':
1023 if kind != 'file':
1023 continue
1024 continue
1024 name = ''.join(c.data for c
1025 name = ''.join(c.data for c
1025 in e.getElementsByTagName('name')[0].childNodes
1026 in e.getElementsByTagName('name')[0].childNodes
1026 if c.nodeType == c.TEXT_NODE)
1027 if c.nodeType == c.TEXT_NODE)
1027 paths.append(name.encode('utf-8'))
1028 paths.append(name.encode('utf-8'))
1028 return paths
1029 return paths
1029
1030
1030 def filedata(self, name):
1031 def filedata(self, name):
1031 return self._svncommand(['cat'], name)[0]
1032 return self._svncommand(['cat'], name)[0]
1032
1033
1033
1034
1034 class gitsubrepo(abstractsubrepo):
1035 class gitsubrepo(abstractsubrepo):
1035 def __init__(self, ctx, path, state):
1036 def __init__(self, ctx, path, state):
1036 self._state = state
1037 self._state = state
1037 self._ctx = ctx
1038 self._ctx = ctx
1038 self._path = path
1039 self._path = path
1039 self._relpath = os.path.join(reporelpath(ctx._repo), path)
1040 self._relpath = os.path.join(reporelpath(ctx._repo), path)
1040 self._abspath = ctx._repo.wjoin(path)
1041 self._abspath = ctx._repo.wjoin(path)
1041 self._subparent = ctx._repo
1042 self._subparent = ctx._repo
1042 self._ui = ctx._repo.ui
1043 self._ui = ctx._repo.ui
1043 self._ensuregit()
1044 self._ensuregit()
1044
1045
1045 def _ensuregit(self):
1046 def _ensuregit(self):
1046 try:
1047 try:
1047 self._gitexecutable = 'git'
1048 self._gitexecutable = 'git'
1048 out, err = self._gitnodir(['--version'])
1049 out, err = self._gitnodir(['--version'])
1049 except OSError, e:
1050 except OSError, e:
1050 if e.errno != 2 or os.name != 'nt':
1051 if e.errno != 2 or os.name != 'nt':
1051 raise
1052 raise
1052 self._gitexecutable = 'git.cmd'
1053 self._gitexecutable = 'git.cmd'
1053 out, err = self._gitnodir(['--version'])
1054 out, err = self._gitnodir(['--version'])
1054 m = re.search(r'^git version (\d+)\.(\d+)\.(\d+)', out)
1055 m = re.search(r'^git version (\d+)\.(\d+)\.(\d+)', out)
1055 if not m:
1056 if not m:
1056 self._ui.warn(_('cannot retrieve git version'))
1057 self._ui.warn(_('cannot retrieve git version'))
1057 return
1058 return
1058 version = (int(m.group(1)), m.group(2), m.group(3))
1059 version = (int(m.group(1)), m.group(2), m.group(3))
1059 # git 1.4.0 can't work at all, but 1.5.X can in at least some cases,
1060 # git 1.4.0 can't work at all, but 1.5.X can in at least some cases,
1060 # despite the docstring comment. For now, error on 1.4.0, warn on
1061 # despite the docstring comment. For now, error on 1.4.0, warn on
1061 # 1.5.0 but attempt to continue.
1062 # 1.5.0 but attempt to continue.
1062 if version < (1, 5, 0):
1063 if version < (1, 5, 0):
1063 raise util.Abort(_('git subrepo requires at least 1.6.0 or later'))
1064 raise util.Abort(_('git subrepo requires at least 1.6.0 or later'))
1064 elif version < (1, 6, 0):
1065 elif version < (1, 6, 0):
1065 self._ui.warn(_('git subrepo requires at least 1.6.0 or later'))
1066 self._ui.warn(_('git subrepo requires at least 1.6.0 or later'))
1066
1067
1067 def _gitcommand(self, commands, env=None, stream=False):
1068 def _gitcommand(self, commands, env=None, stream=False):
1068 return self._gitdir(commands, env=env, stream=stream)[0]
1069 return self._gitdir(commands, env=env, stream=stream)[0]
1069
1070
1070 def _gitdir(self, commands, env=None, stream=False):
1071 def _gitdir(self, commands, env=None, stream=False):
1071 return self._gitnodir(commands, env=env, stream=stream,
1072 return self._gitnodir(commands, env=env, stream=stream,
1072 cwd=self._abspath)
1073 cwd=self._abspath)
1073
1074
1074 def _gitnodir(self, commands, env=None, stream=False, cwd=None):
1075 def _gitnodir(self, commands, env=None, stream=False, cwd=None):
1075 """Calls the git command
1076 """Calls the git command
1076
1077
1077 The methods tries to call the git command. versions prior to 1.6.0
1078 The methods tries to call the git command. versions prior to 1.6.0
1078 are not supported and very probably fail.
1079 are not supported and very probably fail.
1079 """
1080 """
1080 self._ui.debug('%s: git %s\n' % (self._relpath, ' '.join(commands)))
1081 self._ui.debug('%s: git %s\n' % (self._relpath, ' '.join(commands)))
1081 # unless ui.quiet is set, print git's stderr,
1082 # unless ui.quiet is set, print git's stderr,
1082 # which is mostly progress and useful info
1083 # which is mostly progress and useful info
1083 errpipe = None
1084 errpipe = None
1084 if self._ui.quiet:
1085 if self._ui.quiet:
1085 errpipe = open(os.devnull, 'w')
1086 errpipe = open(os.devnull, 'w')
1086 p = subprocess.Popen([self._gitexecutable] + commands, bufsize=-1,
1087 p = subprocess.Popen([self._gitexecutable] + commands, bufsize=-1,
1087 cwd=cwd, env=env, close_fds=util.closefds,
1088 cwd=cwd, env=env, close_fds=util.closefds,
1088 stdout=subprocess.PIPE, stderr=errpipe)
1089 stdout=subprocess.PIPE, stderr=errpipe)
1089 if stream:
1090 if stream:
1090 return p.stdout, None
1091 return p.stdout, None
1091
1092
1092 retdata = p.stdout.read().strip()
1093 retdata = p.stdout.read().strip()
1093 # wait for the child to exit to avoid race condition.
1094 # wait for the child to exit to avoid race condition.
1094 p.wait()
1095 p.wait()
1095
1096
1096 if p.returncode != 0 and p.returncode != 1:
1097 if p.returncode != 0 and p.returncode != 1:
1097 # there are certain error codes that are ok
1098 # there are certain error codes that are ok
1098 command = commands[0]
1099 command = commands[0]
1099 if command in ('cat-file', 'symbolic-ref'):
1100 if command in ('cat-file', 'symbolic-ref'):
1100 return retdata, p.returncode
1101 return retdata, p.returncode
1101 # for all others, abort
1102 # for all others, abort
1102 raise util.Abort('git %s error %d in %s' %
1103 raise util.Abort('git %s error %d in %s' %
1103 (command, p.returncode, self._relpath))
1104 (command, p.returncode, self._relpath))
1104
1105
1105 return retdata, p.returncode
1106 return retdata, p.returncode
1106
1107
1107 def _gitmissing(self):
1108 def _gitmissing(self):
1108 return not os.path.exists(os.path.join(self._abspath, '.git'))
1109 return not os.path.exists(os.path.join(self._abspath, '.git'))
1109
1110
1110 def _gitstate(self):
1111 def _gitstate(self):
1111 return self._gitcommand(['rev-parse', 'HEAD'])
1112 return self._gitcommand(['rev-parse', 'HEAD'])
1112
1113
1113 def _gitcurrentbranch(self):
1114 def _gitcurrentbranch(self):
1114 current, err = self._gitdir(['symbolic-ref', 'HEAD', '--quiet'])
1115 current, err = self._gitdir(['symbolic-ref', 'HEAD', '--quiet'])
1115 if err:
1116 if err:
1116 current = None
1117 current = None
1117 return current
1118 return current
1118
1119
1119 def _gitremote(self, remote):
1120 def _gitremote(self, remote):
1120 out = self._gitcommand(['remote', 'show', '-n', remote])
1121 out = self._gitcommand(['remote', 'show', '-n', remote])
1121 line = out.split('\n')[1]
1122 line = out.split('\n')[1]
1122 i = line.index('URL: ') + len('URL: ')
1123 i = line.index('URL: ') + len('URL: ')
1123 return line[i:]
1124 return line[i:]
1124
1125
1125 def _githavelocally(self, revision):
1126 def _githavelocally(self, revision):
1126 out, code = self._gitdir(['cat-file', '-e', revision])
1127 out, code = self._gitdir(['cat-file', '-e', revision])
1127 return code == 0
1128 return code == 0
1128
1129
1129 def _gitisancestor(self, r1, r2):
1130 def _gitisancestor(self, r1, r2):
1130 base = self._gitcommand(['merge-base', r1, r2])
1131 base = self._gitcommand(['merge-base', r1, r2])
1131 return base == r1
1132 return base == r1
1132
1133
1133 def _gitisbare(self):
1134 def _gitisbare(self):
1134 return self._gitcommand(['config', '--bool', 'core.bare']) == 'true'
1135 return self._gitcommand(['config', '--bool', 'core.bare']) == 'true'
1135
1136
1136 def _gitupdatestat(self):
1137 def _gitupdatestat(self):
1137 """This must be run before git diff-index.
1138 """This must be run before git diff-index.
1138 diff-index only looks at changes to file stat;
1139 diff-index only looks at changes to file stat;
1139 this command looks at file contents and updates the stat."""
1140 this command looks at file contents and updates the stat."""
1140 self._gitcommand(['update-index', '-q', '--refresh'])
1141 self._gitcommand(['update-index', '-q', '--refresh'])
1141
1142
1142 def _gitbranchmap(self):
1143 def _gitbranchmap(self):
1143 '''returns 2 things:
1144 '''returns 2 things:
1144 a map from git branch to revision
1145 a map from git branch to revision
1145 a map from revision to branches'''
1146 a map from revision to branches'''
1146 branch2rev = {}
1147 branch2rev = {}
1147 rev2branch = {}
1148 rev2branch = {}
1148
1149
1149 out = self._gitcommand(['for-each-ref', '--format',
1150 out = self._gitcommand(['for-each-ref', '--format',
1150 '%(objectname) %(refname)'])
1151 '%(objectname) %(refname)'])
1151 for line in out.split('\n'):
1152 for line in out.split('\n'):
1152 revision, ref = line.split(' ')
1153 revision, ref = line.split(' ')
1153 if (not ref.startswith('refs/heads/') and
1154 if (not ref.startswith('refs/heads/') and
1154 not ref.startswith('refs/remotes/')):
1155 not ref.startswith('refs/remotes/')):
1155 continue
1156 continue
1156 if ref.startswith('refs/remotes/') and ref.endswith('/HEAD'):
1157 if ref.startswith('refs/remotes/') and ref.endswith('/HEAD'):
1157 continue # ignore remote/HEAD redirects
1158 continue # ignore remote/HEAD redirects
1158 branch2rev[ref] = revision
1159 branch2rev[ref] = revision
1159 rev2branch.setdefault(revision, []).append(ref)
1160 rev2branch.setdefault(revision, []).append(ref)
1160 return branch2rev, rev2branch
1161 return branch2rev, rev2branch
1161
1162
1162 def _gittracking(self, branches):
1163 def _gittracking(self, branches):
1163 'return map of remote branch to local tracking branch'
1164 'return map of remote branch to local tracking branch'
1164 # assumes no more than one local tracking branch for each remote
1165 # assumes no more than one local tracking branch for each remote
1165 tracking = {}
1166 tracking = {}
1166 for b in branches:
1167 for b in branches:
1167 if b.startswith('refs/remotes/'):
1168 if b.startswith('refs/remotes/'):
1168 continue
1169 continue
1169 bname = b.split('/', 2)[2]
1170 bname = b.split('/', 2)[2]
1170 remote = self._gitcommand(['config', 'branch.%s.remote' % bname])
1171 remote = self._gitcommand(['config', 'branch.%s.remote' % bname])
1171 if remote:
1172 if remote:
1172 ref = self._gitcommand(['config', 'branch.%s.merge' % bname])
1173 ref = self._gitcommand(['config', 'branch.%s.merge' % bname])
1173 tracking['refs/remotes/%s/%s' %
1174 tracking['refs/remotes/%s/%s' %
1174 (remote, ref.split('/', 2)[2])] = b
1175 (remote, ref.split('/', 2)[2])] = b
1175 return tracking
1176 return tracking
1176
1177
1177 def _abssource(self, source):
1178 def _abssource(self, source):
1178 if '://' not in source:
1179 if '://' not in source:
1179 # recognize the scp syntax as an absolute source
1180 # recognize the scp syntax as an absolute source
1180 colon = source.find(':')
1181 colon = source.find(':')
1181 if colon != -1 and '/' not in source[:colon]:
1182 if colon != -1 and '/' not in source[:colon]:
1182 return source
1183 return source
1183 self._subsource = source
1184 self._subsource = source
1184 return _abssource(self)
1185 return _abssource(self)
1185
1186
1186 def _fetch(self, source, revision):
1187 def _fetch(self, source, revision):
1187 if self._gitmissing():
1188 if self._gitmissing():
1188 source = self._abssource(source)
1189 source = self._abssource(source)
1189 self._ui.status(_('cloning subrepo %s from %s\n') %
1190 self._ui.status(_('cloning subrepo %s from %s\n') %
1190 (self._relpath, source))
1191 (self._relpath, source))
1191 self._gitnodir(['clone', source, self._abspath])
1192 self._gitnodir(['clone', source, self._abspath])
1192 if self._githavelocally(revision):
1193 if self._githavelocally(revision):
1193 return
1194 return
1194 self._ui.status(_('pulling subrepo %s from %s\n') %
1195 self._ui.status(_('pulling subrepo %s from %s\n') %
1195 (self._relpath, self._gitremote('origin')))
1196 (self._relpath, self._gitremote('origin')))
1196 # try only origin: the originally cloned repo
1197 # try only origin: the originally cloned repo
1197 self._gitcommand(['fetch'])
1198 self._gitcommand(['fetch'])
1198 if not self._githavelocally(revision):
1199 if not self._githavelocally(revision):
1199 raise util.Abort(_("revision %s does not exist in subrepo %s\n") %
1200 raise util.Abort(_("revision %s does not exist in subrepo %s\n") %
1200 (revision, self._relpath))
1201 (revision, self._relpath))
1201
1202
1202 @annotatesubrepoerror
1203 @annotatesubrepoerror
1203 def dirty(self, ignoreupdate=False):
1204 def dirty(self, ignoreupdate=False):
1204 if self._gitmissing():
1205 if self._gitmissing():
1205 return self._state[1] != ''
1206 return self._state[1] != ''
1206 if self._gitisbare():
1207 if self._gitisbare():
1207 return True
1208 return True
1208 if not ignoreupdate and self._state[1] != self._gitstate():
1209 if not ignoreupdate and self._state[1] != self._gitstate():
1209 # different version checked out
1210 # different version checked out
1210 return True
1211 return True
1211 # check for staged changes or modified files; ignore untracked files
1212 # check for staged changes or modified files; ignore untracked files
1212 self._gitupdatestat()
1213 self._gitupdatestat()
1213 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
1214 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
1214 return code == 1
1215 return code == 1
1215
1216
1216 def basestate(self):
1217 def basestate(self):
1217 return self._gitstate()
1218 return self._gitstate()
1218
1219
1219 @annotatesubrepoerror
1220 @annotatesubrepoerror
1220 def get(self, state, overwrite=False):
1221 def get(self, state, overwrite=False):
1221 source, revision, kind = state
1222 source, revision, kind = state
1222 if not revision:
1223 if not revision:
1223 self.remove()
1224 self.remove()
1224 return
1225 return
1225 self._fetch(source, revision)
1226 self._fetch(source, revision)
1226 # if the repo was set to be bare, unbare it
1227 # if the repo was set to be bare, unbare it
1227 if self._gitisbare():
1228 if self._gitisbare():
1228 self._gitcommand(['config', 'core.bare', 'false'])
1229 self._gitcommand(['config', 'core.bare', 'false'])
1229 if self._gitstate() == revision:
1230 if self._gitstate() == revision:
1230 self._gitcommand(['reset', '--hard', 'HEAD'])
1231 self._gitcommand(['reset', '--hard', 'HEAD'])
1231 return
1232 return
1232 elif self._gitstate() == revision:
1233 elif self._gitstate() == revision:
1233 if overwrite:
1234 if overwrite:
1234 # first reset the index to unmark new files for commit, because
1235 # first reset the index to unmark new files for commit, because
1235 # reset --hard will otherwise throw away files added for commit,
1236 # reset --hard will otherwise throw away files added for commit,
1236 # not just unmark them.
1237 # not just unmark them.
1237 self._gitcommand(['reset', 'HEAD'])
1238 self._gitcommand(['reset', 'HEAD'])
1238 self._gitcommand(['reset', '--hard', 'HEAD'])
1239 self._gitcommand(['reset', '--hard', 'HEAD'])
1239 return
1240 return
1240 branch2rev, rev2branch = self._gitbranchmap()
1241 branch2rev, rev2branch = self._gitbranchmap()
1241
1242
1242 def checkout(args):
1243 def checkout(args):
1243 cmd = ['checkout']
1244 cmd = ['checkout']
1244 if overwrite:
1245 if overwrite:
1245 # first reset the index to unmark new files for commit, because
1246 # first reset the index to unmark new files for commit, because
1246 # the -f option will otherwise throw away files added for
1247 # the -f option will otherwise throw away files added for
1247 # commit, not just unmark them.
1248 # commit, not just unmark them.
1248 self._gitcommand(['reset', 'HEAD'])
1249 self._gitcommand(['reset', 'HEAD'])
1249 cmd.append('-f')
1250 cmd.append('-f')
1250 self._gitcommand(cmd + args)
1251 self._gitcommand(cmd + args)
1251
1252
1252 def rawcheckout():
1253 def rawcheckout():
1253 # no branch to checkout, check it out with no branch
1254 # no branch to checkout, check it out with no branch
1254 self._ui.warn(_('checking out detached HEAD in subrepo %s\n') %
1255 self._ui.warn(_('checking out detached HEAD in subrepo %s\n') %
1255 self._relpath)
1256 self._relpath)
1256 self._ui.warn(_('check out a git branch if you intend '
1257 self._ui.warn(_('check out a git branch if you intend '
1257 'to make changes\n'))
1258 'to make changes\n'))
1258 checkout(['-q', revision])
1259 checkout(['-q', revision])
1259
1260
1260 if revision not in rev2branch:
1261 if revision not in rev2branch:
1261 rawcheckout()
1262 rawcheckout()
1262 return
1263 return
1263 branches = rev2branch[revision]
1264 branches = rev2branch[revision]
1264 firstlocalbranch = None
1265 firstlocalbranch = None
1265 for b in branches:
1266 for b in branches:
1266 if b == 'refs/heads/master':
1267 if b == 'refs/heads/master':
1267 # master trumps all other branches
1268 # master trumps all other branches
1268 checkout(['refs/heads/master'])
1269 checkout(['refs/heads/master'])
1269 return
1270 return
1270 if not firstlocalbranch and not b.startswith('refs/remotes/'):
1271 if not firstlocalbranch and not b.startswith('refs/remotes/'):
1271 firstlocalbranch = b
1272 firstlocalbranch = b
1272 if firstlocalbranch:
1273 if firstlocalbranch:
1273 checkout([firstlocalbranch])
1274 checkout([firstlocalbranch])
1274 return
1275 return
1275
1276
1276 tracking = self._gittracking(branch2rev.keys())
1277 tracking = self._gittracking(branch2rev.keys())
1277 # choose a remote branch already tracked if possible
1278 # choose a remote branch already tracked if possible
1278 remote = branches[0]
1279 remote = branches[0]
1279 if remote not in tracking:
1280 if remote not in tracking:
1280 for b in branches:
1281 for b in branches:
1281 if b in tracking:
1282 if b in tracking:
1282 remote = b
1283 remote = b
1283 break
1284 break
1284
1285
1285 if remote not in tracking:
1286 if remote not in tracking:
1286 # create a new local tracking branch
1287 # create a new local tracking branch
1287 local = remote.split('/', 3)[3]
1288 local = remote.split('/', 3)[3]
1288 checkout(['-b', local, remote])
1289 checkout(['-b', local, remote])
1289 elif self._gitisancestor(branch2rev[tracking[remote]], remote):
1290 elif self._gitisancestor(branch2rev[tracking[remote]], remote):
1290 # When updating to a tracked remote branch,
1291 # When updating to a tracked remote branch,
1291 # if the local tracking branch is downstream of it,
1292 # if the local tracking branch is downstream of it,
1292 # a normal `git pull` would have performed a "fast-forward merge"
1293 # a normal `git pull` would have performed a "fast-forward merge"
1293 # which is equivalent to updating the local branch to the remote.
1294 # which is equivalent to updating the local branch to the remote.
1294 # Since we are only looking at branching at update, we need to
1295 # Since we are only looking at branching at update, we need to
1295 # detect this situation and perform this action lazily.
1296 # detect this situation and perform this action lazily.
1296 if tracking[remote] != self._gitcurrentbranch():
1297 if tracking[remote] != self._gitcurrentbranch():
1297 checkout([tracking[remote]])
1298 checkout([tracking[remote]])
1298 self._gitcommand(['merge', '--ff', remote])
1299 self._gitcommand(['merge', '--ff', remote])
1299 else:
1300 else:
1300 # a real merge would be required, just checkout the revision
1301 # a real merge would be required, just checkout the revision
1301 rawcheckout()
1302 rawcheckout()
1302
1303
1303 @annotatesubrepoerror
1304 @annotatesubrepoerror
1304 def commit(self, text, user, date):
1305 def commit(self, text, user, date):
1305 if self._gitmissing():
1306 if self._gitmissing():
1306 raise util.Abort(_("subrepo %s is missing") % self._relpath)
1307 raise util.Abort(_("subrepo %s is missing") % self._relpath)
1307 cmd = ['commit', '-a', '-m', text]
1308 cmd = ['commit', '-a', '-m', text]
1308 env = os.environ.copy()
1309 env = os.environ.copy()
1309 if user:
1310 if user:
1310 cmd += ['--author', user]
1311 cmd += ['--author', user]
1311 if date:
1312 if date:
1312 # git's date parser silently ignores when seconds < 1e9
1313 # git's date parser silently ignores when seconds < 1e9
1313 # convert to ISO8601
1314 # convert to ISO8601
1314 env['GIT_AUTHOR_DATE'] = util.datestr(date,
1315 env['GIT_AUTHOR_DATE'] = util.datestr(date,
1315 '%Y-%m-%dT%H:%M:%S %1%2')
1316 '%Y-%m-%dT%H:%M:%S %1%2')
1316 self._gitcommand(cmd, env=env)
1317 self._gitcommand(cmd, env=env)
1317 # make sure commit works otherwise HEAD might not exist under certain
1318 # make sure commit works otherwise HEAD might not exist under certain
1318 # circumstances
1319 # circumstances
1319 return self._gitstate()
1320 return self._gitstate()
1320
1321
1321 @annotatesubrepoerror
1322 @annotatesubrepoerror
1322 def merge(self, state):
1323 def merge(self, state):
1323 source, revision, kind = state
1324 source, revision, kind = state
1324 self._fetch(source, revision)
1325 self._fetch(source, revision)
1325 base = self._gitcommand(['merge-base', revision, self._state[1]])
1326 base = self._gitcommand(['merge-base', revision, self._state[1]])
1326 self._gitupdatestat()
1327 self._gitupdatestat()
1327 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
1328 out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
1328
1329
1329 def mergefunc():
1330 def mergefunc():
1330 if base == revision:
1331 if base == revision:
1331 self.get(state) # fast forward merge
1332 self.get(state) # fast forward merge
1332 elif base != self._state[1]:
1333 elif base != self._state[1]:
1333 self._gitcommand(['merge', '--no-commit', revision])
1334 self._gitcommand(['merge', '--no-commit', revision])
1334
1335
1335 if self.dirty():
1336 if self.dirty():
1336 if self._gitstate() != revision:
1337 if self._gitstate() != revision:
1337 dirty = self._gitstate() == self._state[1] or code != 0
1338 dirty = self._gitstate() == self._state[1] or code != 0
1338 if _updateprompt(self._ui, self, dirty,
1339 if _updateprompt(self._ui, self, dirty,
1339 self._state[1][:7], revision[:7]):
1340 self._state[1][:7], revision[:7]):
1340 mergefunc()
1341 mergefunc()
1341 else:
1342 else:
1342 mergefunc()
1343 mergefunc()
1343
1344
1344 @annotatesubrepoerror
1345 @annotatesubrepoerror
1345 def push(self, opts):
1346 def push(self, opts):
1346 force = opts.get('force')
1347 force = opts.get('force')
1347
1348
1348 if not self._state[1]:
1349 if not self._state[1]:
1349 return True
1350 return True
1350 if self._gitmissing():
1351 if self._gitmissing():
1351 raise util.Abort(_("subrepo %s is missing") % self._relpath)
1352 raise util.Abort(_("subrepo %s is missing") % self._relpath)
1352 # if a branch in origin contains the revision, nothing to do
1353 # if a branch in origin contains the revision, nothing to do
1353 branch2rev, rev2branch = self._gitbranchmap()
1354 branch2rev, rev2branch = self._gitbranchmap()
1354 if self._state[1] in rev2branch:
1355 if self._state[1] in rev2branch:
1355 for b in rev2branch[self._state[1]]:
1356 for b in rev2branch[self._state[1]]:
1356 if b.startswith('refs/remotes/origin/'):
1357 if b.startswith('refs/remotes/origin/'):
1357 return True
1358 return True
1358 for b, revision in branch2rev.iteritems():
1359 for b, revision in branch2rev.iteritems():
1359 if b.startswith('refs/remotes/origin/'):
1360 if b.startswith('refs/remotes/origin/'):
1360 if self._gitisancestor(self._state[1], revision):
1361 if self._gitisancestor(self._state[1], revision):
1361 return True
1362 return True
1362 # otherwise, try to push the currently checked out branch
1363 # otherwise, try to push the currently checked out branch
1363 cmd = ['push']
1364 cmd = ['push']
1364 if force:
1365 if force:
1365 cmd.append('--force')
1366 cmd.append('--force')
1366
1367
1367 current = self._gitcurrentbranch()
1368 current = self._gitcurrentbranch()
1368 if current:
1369 if current:
1369 # determine if the current branch is even useful
1370 # determine if the current branch is even useful
1370 if not self._gitisancestor(self._state[1], current):
1371 if not self._gitisancestor(self._state[1], current):
1371 self._ui.warn(_('unrelated git branch checked out '
1372 self._ui.warn(_('unrelated git branch checked out '
1372 'in subrepo %s\n') % self._relpath)
1373 'in subrepo %s\n') % self._relpath)
1373 return False
1374 return False
1374 self._ui.status(_('pushing branch %s of subrepo %s\n') %
1375 self._ui.status(_('pushing branch %s of subrepo %s\n') %
1375 (current.split('/', 2)[2], self._relpath))
1376 (current.split('/', 2)[2], self._relpath))
1376 self._gitcommand(cmd + ['origin', current])
1377 self._gitcommand(cmd + ['origin', current])
1377 return True
1378 return True
1378 else:
1379 else:
1379 self._ui.warn(_('no branch checked out in subrepo %s\n'
1380 self._ui.warn(_('no branch checked out in subrepo %s\n'
1380 'cannot push revision %s\n') %
1381 'cannot push revision %s\n') %
1381 (self._relpath, self._state[1]))
1382 (self._relpath, self._state[1]))
1382 return False
1383 return False
1383
1384
1384 @annotatesubrepoerror
1385 @annotatesubrepoerror
1385 def remove(self):
1386 def remove(self):
1386 if self._gitmissing():
1387 if self._gitmissing():
1387 return
1388 return
1388 if self.dirty():
1389 if self.dirty():
1389 self._ui.warn(_('not removing repo %s because '
1390 self._ui.warn(_('not removing repo %s because '
1390 'it has changes.\n') % self._relpath)
1391 'it has changes.\n') % self._relpath)
1391 return
1392 return
1392 # we can't fully delete the repository as it may contain
1393 # we can't fully delete the repository as it may contain
1393 # local-only history
1394 # local-only history
1394 self._ui.note(_('removing subrepo %s\n') % self._relpath)
1395 self._ui.note(_('removing subrepo %s\n') % self._relpath)
1395 self._gitcommand(['config', 'core.bare', 'true'])
1396 self._gitcommand(['config', 'core.bare', 'true'])
1396 for f in os.listdir(self._abspath):
1397 for f in os.listdir(self._abspath):
1397 if f == '.git':
1398 if f == '.git':
1398 continue
1399 continue
1399 path = os.path.join(self._abspath, f)
1400 path = os.path.join(self._abspath, f)
1400 if os.path.isdir(path) and not os.path.islink(path):
1401 if os.path.isdir(path) and not os.path.islink(path):
1401 shutil.rmtree(path)
1402 shutil.rmtree(path)
1402 else:
1403 else:
1403 os.remove(path)
1404 os.remove(path)
1404
1405
1405 def archive(self, ui, archiver, prefix, match=None):
1406 def archive(self, ui, archiver, prefix, match=None):
1406 total = 0
1407 total = 0
1407 source, revision = self._state
1408 source, revision = self._state
1408 if not revision:
1409 if not revision:
1409 return total
1410 return total
1410 self._fetch(source, revision)
1411 self._fetch(source, revision)
1411
1412
1412 # Parse git's native archive command.
1413 # Parse git's native archive command.
1413 # This should be much faster than manually traversing the trees
1414 # This should be much faster than manually traversing the trees
1414 # and objects with many subprocess calls.
1415 # and objects with many subprocess calls.
1415 tarstream = self._gitcommand(['archive', revision], stream=True)
1416 tarstream = self._gitcommand(['archive', revision], stream=True)
1416 tar = tarfile.open(fileobj=tarstream, mode='r|')
1417 tar = tarfile.open(fileobj=tarstream, mode='r|')
1417 relpath = subrelpath(self)
1418 relpath = subrelpath(self)
1418 ui.progress(_('archiving (%s)') % relpath, 0, unit=_('files'))
1419 ui.progress(_('archiving (%s)') % relpath, 0, unit=_('files'))
1419 for i, info in enumerate(tar):
1420 for i, info in enumerate(tar):
1420 if info.isdir():
1421 if info.isdir():
1421 continue
1422 continue
1422 if match and not match(info.name):
1423 if match and not match(info.name):
1423 continue
1424 continue
1424 if info.issym():
1425 if info.issym():
1425 data = info.linkname
1426 data = info.linkname
1426 else:
1427 else:
1427 data = tar.extractfile(info).read()
1428 data = tar.extractfile(info).read()
1428 archiver.addfile(os.path.join(prefix, self._path, info.name),
1429 archiver.addfile(os.path.join(prefix, self._path, info.name),
1429 info.mode, info.issym(), data)
1430 info.mode, info.issym(), data)
1430 total += 1
1431 total += 1
1431 ui.progress(_('archiving (%s)') % relpath, i + 1,
1432 ui.progress(_('archiving (%s)') % relpath, i + 1,
1432 unit=_('files'))
1433 unit=_('files'))
1433 ui.progress(_('archiving (%s)') % relpath, None)
1434 ui.progress(_('archiving (%s)') % relpath, None)
1434 return total
1435 return total
1435
1436
1436
1437
1437 @annotatesubrepoerror
1438 @annotatesubrepoerror
1438 def status(self, rev2, **opts):
1439 def status(self, rev2, **opts):
1439 rev1 = self._state[1]
1440 rev1 = self._state[1]
1440 if self._gitmissing() or not rev1:
1441 if self._gitmissing() or not rev1:
1441 # if the repo is missing, return no results
1442 # if the repo is missing, return no results
1442 return [], [], [], [], [], [], []
1443 return [], [], [], [], [], [], []
1443 modified, added, removed = [], [], []
1444 modified, added, removed = [], [], []
1444 self._gitupdatestat()
1445 self._gitupdatestat()
1445 if rev2:
1446 if rev2:
1446 command = ['diff-tree', rev1, rev2]
1447 command = ['diff-tree', rev1, rev2]
1447 else:
1448 else:
1448 command = ['diff-index', rev1]
1449 command = ['diff-index', rev1]
1449 out = self._gitcommand(command)
1450 out = self._gitcommand(command)
1450 for line in out.split('\n'):
1451 for line in out.split('\n'):
1451 tab = line.find('\t')
1452 tab = line.find('\t')
1452 if tab == -1:
1453 if tab == -1:
1453 continue
1454 continue
1454 status, f = line[tab - 1], line[tab + 1:]
1455 status, f = line[tab - 1], line[tab + 1:]
1455 if status == 'M':
1456 if status == 'M':
1456 modified.append(f)
1457 modified.append(f)
1457 elif status == 'A':
1458 elif status == 'A':
1458 added.append(f)
1459 added.append(f)
1459 elif status == 'D':
1460 elif status == 'D':
1460 removed.append(f)
1461 removed.append(f)
1461
1462
1462 deleted = unknown = ignored = clean = []
1463 deleted = unknown = ignored = clean = []
1463 return modified, added, removed, deleted, unknown, ignored, clean
1464 return modified, added, removed, deleted, unknown, ignored, clean
1464
1465
1465 types = {
1466 types = {
1466 'hg': hgsubrepo,
1467 'hg': hgsubrepo,
1467 'svn': svnsubrepo,
1468 'svn': svnsubrepo,
1468 'git': gitsubrepo,
1469 'git': gitsubrepo,
1469 }
1470 }
General Comments 0
You need to be logged in to leave comments. Login now