##// END OF EJS Templates
Merge with stable
Matt Mackall -
r10296:cade47dc merge default
parent child Browse files
Show More
@@ -1,144 +1,144 b''
1 # config.py - configuration parsing for Mercurial
1 # config.py - configuration parsing for Mercurial
2 #
2 #
3 # Copyright 2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 import error
9 import error
10 import re, os
10 import re, os
11
11
12 class sortdict(dict):
12 class sortdict(dict):
13 'a simple sorted dictionary'
13 'a simple sorted dictionary'
14 def __init__(self, data=None):
14 def __init__(self, data=None):
15 self._list = []
15 self._list = []
16 if data:
16 if data:
17 self.update(data)
17 self.update(data)
18 def copy(self):
18 def copy(self):
19 return sortdict(self)
19 return sortdict(self)
20 def __setitem__(self, key, val):
20 def __setitem__(self, key, val):
21 if key in self:
21 if key in self:
22 self._list.remove(key)
22 self._list.remove(key)
23 self._list.append(key)
23 self._list.append(key)
24 dict.__setitem__(self, key, val)
24 dict.__setitem__(self, key, val)
25 def __iter__(self):
25 def __iter__(self):
26 return self._list.__iter__()
26 return self._list.__iter__()
27 def update(self, src):
27 def update(self, src):
28 for k in src:
28 for k in src:
29 self[k] = src[k]
29 self[k] = src[k]
30 def items(self):
30 def items(self):
31 return [(k, self[k]) for k in self._list]
31 return [(k, self[k]) for k in self._list]
32 def __delitem__(self, key):
32 def __delitem__(self, key):
33 dict.__delitem__(self, key)
33 dict.__delitem__(self, key)
34 self._list.remove(key)
34 self._list.remove(key)
35
35
36 class config(object):
36 class config(object):
37 def __init__(self, data=None):
37 def __init__(self, data=None):
38 self._data = {}
38 self._data = {}
39 self._source = {}
39 self._source = {}
40 if data:
40 if data:
41 for k in data._data:
41 for k in data._data:
42 self._data[k] = data[k].copy()
42 self._data[k] = data[k].copy()
43 self._source = data._source.copy()
43 self._source = data._source.copy()
44 def copy(self):
44 def copy(self):
45 return config(self)
45 return config(self)
46 def __contains__(self, section):
46 def __contains__(self, section):
47 return section in self._data
47 return section in self._data
48 def __getitem__(self, section):
48 def __getitem__(self, section):
49 return self._data.get(section, {})
49 return self._data.get(section, {})
50 def __iter__(self):
50 def __iter__(self):
51 for d in self.sections():
51 for d in self.sections():
52 yield d
52 yield d
53 def update(self, src):
53 def update(self, src):
54 for s in src:
54 for s in src:
55 if s not in self:
55 if s not in self:
56 self._data[s] = sortdict()
56 self._data[s] = sortdict()
57 self._data[s].update(src._data[s])
57 self._data[s].update(src._data[s])
58 self._source.update(src._source)
58 self._source.update(src._source)
59 def get(self, section, item, default=None):
59 def get(self, section, item, default=None):
60 return self._data.get(section, {}).get(item, default)
60 return self._data.get(section, {}).get(item, default)
61 def source(self, section, item):
61 def source(self, section, item):
62 return self._source.get((section, item), "")
62 return self._source.get((section, item), "")
63 def sections(self):
63 def sections(self):
64 return sorted(self._data.keys())
64 return sorted(self._data.keys())
65 def items(self, section):
65 def items(self, section):
66 return self._data.get(section, {}).items()
66 return self._data.get(section, {}).items()
67 def set(self, section, item, value, source=""):
67 def set(self, section, item, value, source=""):
68 if section not in self:
68 if section not in self:
69 self._data[section] = sortdict()
69 self._data[section] = sortdict()
70 self._data[section][item] = value
70 self._data[section][item] = value
71 self._source[(section, item)] = source
71 self._source[(section, item)] = source
72
72
73 def parse(self, src, data, sections=None, remap=None, include=None):
73 def parse(self, src, data, sections=None, remap=None, include=None):
74 sectionre = re.compile(r'\[([^\[]+)\]')
74 sectionre = re.compile(r'\[([^\[]+)\]')
75 itemre = re.compile(r'([^=\s][^=]*?)\s*=\s*(.*\S|)')
75 itemre = re.compile(r'([^=\s][^=]*?)\s*=\s*(.*\S|)')
76 contre = re.compile(r'\s+(\S.*\S)')
76 contre = re.compile(r'\s+(\S|\S.*\S)\s*$')
77 emptyre = re.compile(r'(;|#|\s*$)')
77 emptyre = re.compile(r'(;|#|\s*$)')
78 unsetre = re.compile(r'%unset\s+(\S+)')
78 unsetre = re.compile(r'%unset\s+(\S+)')
79 includere = re.compile(r'%include\s+(\S.*\S)')
79 includere = re.compile(r'%include\s+(\S|\S.*\S)\s*$')
80 section = ""
80 section = ""
81 item = None
81 item = None
82 line = 0
82 line = 0
83 cont = False
83 cont = False
84
84
85 for l in data.splitlines(True):
85 for l in data.splitlines(True):
86 line += 1
86 line += 1
87 if cont:
87 if cont:
88 m = contre.match(l)
88 m = contre.match(l)
89 if m:
89 if m:
90 if sections and section not in sections:
90 if sections and section not in sections:
91 continue
91 continue
92 v = self.get(section, item) + "\n" + m.group(1)
92 v = self.get(section, item) + "\n" + m.group(1)
93 self.set(section, item, v, "%s:%d" % (src, line))
93 self.set(section, item, v, "%s:%d" % (src, line))
94 continue
94 continue
95 item = None
95 item = None
96 cont = False
96 cont = False
97 m = includere.match(l)
97 m = includere.match(l)
98 if m:
98 if m:
99 inc = m.group(1)
99 inc = m.group(1)
100 base = os.path.dirname(src)
100 base = os.path.dirname(src)
101 inc = os.path.normpath(os.path.join(base, inc))
101 inc = os.path.normpath(os.path.join(base, inc))
102 if include:
102 if include:
103 try:
103 try:
104 include(inc, remap=remap, sections=sections)
104 include(inc, remap=remap, sections=sections)
105 except IOError, inst:
105 except IOError, inst:
106 msg = _("config error at %s:%d: "
106 msg = _("config error at %s:%d: "
107 "cannot include %s (%s)") \
107 "cannot include %s (%s)") \
108 % (src, line, inc, inst.strerror)
108 % (src, line, inc, inst.strerror)
109 raise error.ConfigError(msg)
109 raise error.ConfigError(msg)
110 continue
110 continue
111 if emptyre.match(l):
111 if emptyre.match(l):
112 continue
112 continue
113 m = sectionre.match(l)
113 m = sectionre.match(l)
114 if m:
114 if m:
115 section = m.group(1)
115 section = m.group(1)
116 if remap:
116 if remap:
117 section = remap.get(section, section)
117 section = remap.get(section, section)
118 if section not in self:
118 if section not in self:
119 self._data[section] = sortdict()
119 self._data[section] = sortdict()
120 continue
120 continue
121 m = itemre.match(l)
121 m = itemre.match(l)
122 if m:
122 if m:
123 item = m.group(1)
123 item = m.group(1)
124 cont = True
124 cont = True
125 if sections and section not in sections:
125 if sections and section not in sections:
126 continue
126 continue
127 self.set(section, item, m.group(2), "%s:%d" % (src, line))
127 self.set(section, item, m.group(2), "%s:%d" % (src, line))
128 continue
128 continue
129 m = unsetre.match(l)
129 m = unsetre.match(l)
130 if m:
130 if m:
131 name = m.group(1)
131 name = m.group(1)
132 if sections and section not in sections:
132 if sections and section not in sections:
133 continue
133 continue
134 if self.get(section, name) != None:
134 if self.get(section, name) != None:
135 del self._data[section][name]
135 del self._data[section][name]
136 continue
136 continue
137
137
138 raise error.ConfigError(_("config error at %s:%d: '%s'")
138 raise error.ConfigError(_("config error at %s:%d: '%s'")
139 % (src, line, l.rstrip()))
139 % (src, line, l.rstrip()))
140
140
141 def read(self, path, fp=None, sections=None, remap=None):
141 def read(self, path, fp=None, sections=None, remap=None):
142 if not fp:
142 if not fp:
143 fp = open(path)
143 fp = open(path)
144 self.parse(path, fp.read(), sections, remap, self.read)
144 self.parse(path, fp.read(), sections, remap, self.read)
@@ -1,253 +1,253 b''
1 # copies.py - copy detection for Mercurial
1 # copies.py - copy detection for Mercurial
2 #
2 #
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 import util
9 import util
10 import heapq
10 import heapq
11
11
12 def _nonoverlap(d1, d2, d3):
12 def _nonoverlap(d1, d2, d3):
13 "Return list of elements in d1 not in d2 or d3"
13 "Return list of elements in d1 not in d2 or d3"
14 return sorted([d for d in d1 if d not in d3 and d not in d2])
14 return sorted([d for d in d1 if d not in d3 and d not in d2])
15
15
16 def _dirname(f):
16 def _dirname(f):
17 s = f.rfind("/")
17 s = f.rfind("/")
18 if s == -1:
18 if s == -1:
19 return ""
19 return ""
20 return f[:s]
20 return f[:s]
21
21
22 def _dirs(files):
22 def _dirs(files):
23 d = set()
23 d = set()
24 for f in files:
24 for f in files:
25 f = _dirname(f)
25 f = _dirname(f)
26 while f not in d:
26 while f not in d:
27 d.add(f)
27 d.add(f)
28 f = _dirname(f)
28 f = _dirname(f)
29 return d
29 return d
30
30
31 def _findlimit(repo, a, b):
31 def _findlimit(repo, a, b):
32 """Find the earliest revision that's an ancestor of a or b but not both,
32 """Find the earliest revision that's an ancestor of a or b but not both,
33 None if no such revision exists.
33 None if no such revision exists.
34 """
34 """
35 # basic idea:
35 # basic idea:
36 # - mark a and b with different sides
36 # - mark a and b with different sides
37 # - if a parent's children are all on the same side, the parent is
37 # - if a parent's children are all on the same side, the parent is
38 # on that side, otherwise it is on no side
38 # on that side, otherwise it is on no side
39 # - walk the graph in topological order with the help of a heap;
39 # - walk the graph in topological order with the help of a heap;
40 # - add unseen parents to side map
40 # - add unseen parents to side map
41 # - clear side of any parent that has children on different sides
41 # - clear side of any parent that has children on different sides
42 # - track number of interesting revs that might still be on a side
42 # - track number of interesting revs that might still be on a side
43 # - track the lowest interesting rev seen
43 # - track the lowest interesting rev seen
44 # - quit when interesting revs is zero
44 # - quit when interesting revs is zero
45
45
46 cl = repo.changelog
46 cl = repo.changelog
47 working = len(cl) # pseudo rev for the working directory
47 working = len(cl) # pseudo rev for the working directory
48 if a is None:
48 if a is None:
49 a = working
49 a = working
50 if b is None:
50 if b is None:
51 b = working
51 b = working
52
52
53 side = {a: -1, b: 1}
53 side = {a: -1, b: 1}
54 visit = [-a, -b]
54 visit = [-a, -b]
55 heapq.heapify(visit)
55 heapq.heapify(visit)
56 interesting = len(visit)
56 interesting = len(visit)
57 hascommonancestor = False
57 hascommonancestor = False
58 limit = working
58 limit = working
59
59
60 while interesting:
60 while interesting:
61 r = -heapq.heappop(visit)
61 r = -heapq.heappop(visit)
62 if r == working:
62 if r == working:
63 parents = [cl.rev(p) for p in repo.dirstate.parents()]
63 parents = [cl.rev(p) for p in repo.dirstate.parents()]
64 else:
64 else:
65 parents = cl.parentrevs(r)
65 parents = cl.parentrevs(r)
66 for p in parents:
66 for p in parents:
67 if p < 0:
67 if p < 0:
68 continue
68 continue
69 if p not in side:
69 if p not in side:
70 # first time we see p; add it to visit
70 # first time we see p; add it to visit
71 side[p] = side[r]
71 side[p] = side[r]
72 if side[p]:
72 if side[p]:
73 interesting += 1
73 interesting += 1
74 heapq.heappush(visit, -p)
74 heapq.heappush(visit, -p)
75 elif side[p] and side[p] != side[r]:
75 elif side[p] and side[p] != side[r]:
76 # p was interesting but now we know better
76 # p was interesting but now we know better
77 side[p] = 0
77 side[p] = 0
78 interesting -= 1
78 interesting -= 1
79 hascommonancestor = True
79 hascommonancestor = True
80 if side[r]:
80 if side[r]:
81 limit = r # lowest rev visited
81 limit = r # lowest rev visited
82 interesting -= 1
82 interesting -= 1
83
83
84 if not hascommonancestor:
84 if not hascommonancestor:
85 return None
85 return None
86 return limit
86 return limit
87
87
88 def copies(repo, c1, c2, ca, checkdirs=False):
88 def copies(repo, c1, c2, ca, checkdirs=False):
89 """
89 """
90 Find moves and copies between context c1 and c2
90 Find moves and copies between context c1 and c2
91 """
91 """
92 # avoid silly behavior for update from empty dir
92 # avoid silly behavior for update from empty dir
93 if not c1 or not c2 or c1 == c2:
93 if not c1 or not c2 or c1 == c2:
94 return {}, {}
94 return {}, {}
95
95
96 # avoid silly behavior for parent -> working dir
96 # avoid silly behavior for parent -> working dir
97 if c2.node() is None and c1.node() == repo.dirstate.parents()[0]:
97 if c2.node() is None and c1.node() == repo.dirstate.parents()[0]:
98 return repo.dirstate.copies(), {}
98 return repo.dirstate.copies(), {}
99
99
100 limit = _findlimit(repo, c1.rev(), c2.rev())
100 limit = _findlimit(repo, c1.rev(), c2.rev())
101 if limit is None:
101 if limit is None:
102 # no common ancestor, no copies
102 # no common ancestor, no copies
103 return {}, {}
103 return {}, {}
104 m1 = c1.manifest()
104 m1 = c1.manifest()
105 m2 = c2.manifest()
105 m2 = c2.manifest()
106 ma = ca.manifest()
106 ma = ca.manifest()
107
107
108 def makectx(f, n):
108 def makectx(f, n):
109 if len(n) != 20: # in a working context?
109 if len(n) != 20: # in a working context?
110 if c1.rev() is None:
110 if c1.rev() is None:
111 return c1.filectx(f)
111 return c1.filectx(f)
112 return c2.filectx(f)
112 return c2.filectx(f)
113 return repo.filectx(f, fileid=n)
113 return repo.filectx(f, fileid=n)
114
114
115 ctx = util.lrucachefunc(makectx)
115 ctx = util.lrucachefunc(makectx)
116 copy = {}
116 copy = {}
117 fullcopy = {}
117 fullcopy = {}
118 diverge = {}
118 diverge = {}
119
119
120 def related(f1, f2, limit):
120 def related(f1, f2, limit):
121 g1, g2 = f1.ancestors(), f2.ancestors()
121 g1, g2 = f1.ancestors(), f2.ancestors()
122 try:
122 try:
123 while 1:
123 while 1:
124 f1r, f2r = f1.rev(), f2.rev()
124 f1r, f2r = f1.rev(), f2.rev()
125 if f1r > f2r:
125 if f1r > f2r:
126 f1 = g1.next()
126 f1 = g1.next()
127 elif f2r > f1r:
127 elif f2r > f1r:
128 f2 = g2.next()
128 f2 = g2.next()
129 elif f1 == f2:
129 elif f1 == f2:
130 return f1 # a match
130 return f1 # a match
131 elif f1r == f2r or f1r < limit or f2r < limit:
131 elif f1r == f2r or f1r < limit or f2r < limit:
132 return False # copy no longer relevant
132 return False # copy no longer relevant
133 except StopIteration:
133 except StopIteration:
134 return False
134 return False
135
135
136 def checkcopies(f, m1, m2):
136 def checkcopies(f, m1, m2):
137 '''check possible copies of f from m1 to m2'''
137 '''check possible copies of f from m1 to m2'''
138 of = None
138 of = None
139 seen = set([f])
139 seen = set([f])
140 for oc in ctx(f, m1[f]).ancestors():
140 for oc in ctx(f, m1[f]).ancestors():
141 ocr = oc.rev()
141 ocr = oc.rev()
142 of = oc.path()
142 of = oc.path()
143 if of in seen:
143 if of in seen:
144 # check limit late - grab last rename before
144 # check limit late - grab last rename before
145 if ocr < limit:
145 if ocr < limit:
146 break
146 break
147 continue
147 continue
148 seen.add(of)
148 seen.add(of)
149
149
150 fullcopy[f] = of # remember for dir rename detection
150 fullcopy[f] = of # remember for dir rename detection
151 if of not in m2:
151 if of not in m2:
152 continue # no match, keep looking
152 continue # no match, keep looking
153 if m2[of] == ma.get(of):
153 if m2[of] == ma.get(of):
154 break # no merge needed, quit early
154 break # no merge needed, quit early
155 c2 = ctx(of, m2[of])
155 c2 = ctx(of, m2[of])
156 cr = related(oc, c2, ca.rev())
156 cr = related(oc, c2, ca.rev())
157 if of == f or of == c2.path(): # non-divergent
157 if cr and (cr.path() == f or cr.path == c2.path()): # non-divergent
158 copy[f] = of
158 copy[f] = of
159 of = None
159 of = None
160 break
160 break
161
161
162 if of in ma:
162 if of in ma:
163 diverge.setdefault(of, []).append(f)
163 diverge.setdefault(of, []).append(f)
164
164
165 repo.ui.debug(" searching for copies back to rev %d\n" % limit)
165 repo.ui.debug(" searching for copies back to rev %d\n" % limit)
166
166
167 u1 = _nonoverlap(m1, m2, ma)
167 u1 = _nonoverlap(m1, m2, ma)
168 u2 = _nonoverlap(m2, m1, ma)
168 u2 = _nonoverlap(m2, m1, ma)
169
169
170 if u1:
170 if u1:
171 repo.ui.debug(" unmatched files in local:\n %s\n"
171 repo.ui.debug(" unmatched files in local:\n %s\n"
172 % "\n ".join(u1))
172 % "\n ".join(u1))
173 if u2:
173 if u2:
174 repo.ui.debug(" unmatched files in other:\n %s\n"
174 repo.ui.debug(" unmatched files in other:\n %s\n"
175 % "\n ".join(u2))
175 % "\n ".join(u2))
176
176
177 for f in u1:
177 for f in u1:
178 checkcopies(f, m1, m2)
178 checkcopies(f, m1, m2)
179 for f in u2:
179 for f in u2:
180 checkcopies(f, m2, m1)
180 checkcopies(f, m2, m1)
181
181
182 diverge2 = set()
182 diverge2 = set()
183 for of, fl in diverge.items():
183 for of, fl in diverge.items():
184 if len(fl) == 1:
184 if len(fl) == 1:
185 del diverge[of] # not actually divergent
185 del diverge[of] # not actually divergent
186 else:
186 else:
187 diverge2.update(fl) # reverse map for below
187 diverge2.update(fl) # reverse map for below
188
188
189 if fullcopy:
189 if fullcopy:
190 repo.ui.debug(" all copies found (* = to merge, ! = divergent):\n")
190 repo.ui.debug(" all copies found (* = to merge, ! = divergent):\n")
191 for f in fullcopy:
191 for f in fullcopy:
192 note = ""
192 note = ""
193 if f in copy:
193 if f in copy:
194 note += "*"
194 note += "*"
195 if f in diverge2:
195 if f in diverge2:
196 note += "!"
196 note += "!"
197 repo.ui.debug(" %s -> %s %s\n" % (f, fullcopy[f], note))
197 repo.ui.debug(" %s -> %s %s\n" % (f, fullcopy[f], note))
198 del diverge2
198 del diverge2
199
199
200 if not fullcopy or not checkdirs:
200 if not fullcopy or not checkdirs:
201 return copy, diverge
201 return copy, diverge
202
202
203 repo.ui.debug(" checking for directory renames\n")
203 repo.ui.debug(" checking for directory renames\n")
204
204
205 # generate a directory move map
205 # generate a directory move map
206 d1, d2 = _dirs(m1), _dirs(m2)
206 d1, d2 = _dirs(m1), _dirs(m2)
207 invalid = set()
207 invalid = set()
208 dirmove = {}
208 dirmove = {}
209
209
210 # examine each file copy for a potential directory move, which is
210 # examine each file copy for a potential directory move, which is
211 # when all the files in a directory are moved to a new directory
211 # when all the files in a directory are moved to a new directory
212 for dst, src in fullcopy.iteritems():
212 for dst, src in fullcopy.iteritems():
213 dsrc, ddst = _dirname(src), _dirname(dst)
213 dsrc, ddst = _dirname(src), _dirname(dst)
214 if dsrc in invalid:
214 if dsrc in invalid:
215 # already seen to be uninteresting
215 # already seen to be uninteresting
216 continue
216 continue
217 elif dsrc in d1 and ddst in d1:
217 elif dsrc in d1 and ddst in d1:
218 # directory wasn't entirely moved locally
218 # directory wasn't entirely moved locally
219 invalid.add(dsrc)
219 invalid.add(dsrc)
220 elif dsrc in d2 and ddst in d2:
220 elif dsrc in d2 and ddst in d2:
221 # directory wasn't entirely moved remotely
221 # directory wasn't entirely moved remotely
222 invalid.add(dsrc)
222 invalid.add(dsrc)
223 elif dsrc in dirmove and dirmove[dsrc] != ddst:
223 elif dsrc in dirmove and dirmove[dsrc] != ddst:
224 # files from the same directory moved to two different places
224 # files from the same directory moved to two different places
225 invalid.add(dsrc)
225 invalid.add(dsrc)
226 else:
226 else:
227 # looks good so far
227 # looks good so far
228 dirmove[dsrc + "/"] = ddst + "/"
228 dirmove[dsrc + "/"] = ddst + "/"
229
229
230 for i in invalid:
230 for i in invalid:
231 if i in dirmove:
231 if i in dirmove:
232 del dirmove[i]
232 del dirmove[i]
233 del d1, d2, invalid
233 del d1, d2, invalid
234
234
235 if not dirmove:
235 if not dirmove:
236 return copy, diverge
236 return copy, diverge
237
237
238 for d in dirmove:
238 for d in dirmove:
239 repo.ui.debug(" dir %s -> %s\n" % (d, dirmove[d]))
239 repo.ui.debug(" dir %s -> %s\n" % (d, dirmove[d]))
240
240
241 # check unaccounted nonoverlapping files against directory moves
241 # check unaccounted nonoverlapping files against directory moves
242 for f in u1 + u2:
242 for f in u1 + u2:
243 if f not in fullcopy:
243 if f not in fullcopy:
244 for d in dirmove:
244 for d in dirmove:
245 if f.startswith(d):
245 if f.startswith(d):
246 # new file added in a directory that was moved, move it
246 # new file added in a directory that was moved, move it
247 df = dirmove[d] + f[len(d):]
247 df = dirmove[d] + f[len(d):]
248 if df not in copy:
248 if df not in copy:
249 copy[f] = df
249 copy[f] = df
250 repo.ui.debug(" file %s -> %s\n" % (f, copy[f]))
250 repo.ui.debug(" file %s -> %s\n" % (f, copy[f]))
251 break
251 break
252
252
253 return copy, diverge
253 return copy, diverge
@@ -1,23 +1,27 b''
1 #!/bin/sh
1 #!/bin/sh
2
2
3 echo "invalid" > $HGRCPATH
3 echo "invalid" > $HGRCPATH
4 hg version 2>&1 | sed -e "s|$HGRCPATH|\$HGRCPATH|"
4 hg version 2>&1 | sed -e "s|$HGRCPATH|\$HGRCPATH|"
5 echo "" > $HGRCPATH
5 echo "" > $HGRCPATH
6
6
7 # issue1199: escaping
7 # issue1199: escaping
8 hg init "foo%bar"
8 hg init "foo%bar"
9 hg clone "foo%bar" foobar
9 hg clone "foo%bar" foobar
10 p=`pwd`
10 p=`pwd`
11 cd foobar
11 cd foobar
12 cat .hg/hgrc | sed -e "s:$p:...:"
12 cat .hg/hgrc | sed -e "s:$p:...:"
13 hg paths | sed -e "s:$p:...:"
13 hg paths | sed -e "s:$p:...:"
14 hg showconfig | sed -e "s:$p:...:"
14 hg showconfig | sed -e "s:$p:...:"
15 cd ..
15 cd ..
16
16
17 # issue1829: wrong indentation
17 # issue1829: wrong indentation
18 echo '[foo]' > $HGRCPATH
18 echo '[foo]' > $HGRCPATH
19 echo ' x = y' >> $HGRCPATH
19 echo ' x = y' >> $HGRCPATH
20 hg version 2>&1 | sed -e "s|$HGRCPATH|\$HGRCPATH|"
20 hg version 2>&1 | sed -e "s|$HGRCPATH|\$HGRCPATH|"
21
21
22 python -c "print '[foo]\nbar = a\n b\n c \n de\n fg \nbaz = bif cb \n'" \
23 > $HGRCPATH
24 hg showconfig foo
25
22 echo '%include /no-such-file' > $HGRCPATH
26 echo '%include /no-such-file' > $HGRCPATH
23 hg version 2>&1 | sed -e "s|$HGRCPATH|\$HGRCPATH|"
27 hg version 2>&1 | sed -e "s|$HGRCPATH|\$HGRCPATH|"
@@ -1,10 +1,12 b''
1 hg: config error at $HGRCPATH:1: 'invalid'
1 hg: config error at $HGRCPATH:1: 'invalid'
2 updating to branch default
2 updating to branch default
3 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
3 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
4 [paths]
4 [paths]
5 default = .../foo%bar
5 default = .../foo%bar
6 default = .../foo%bar
6 default = .../foo%bar
7 bundle.mainreporoot=.../foobar
7 bundle.mainreporoot=.../foobar
8 paths.default=.../foo%bar
8 paths.default=.../foo%bar
9 hg: config error at $HGRCPATH:2: ' x = y'
9 hg: config error at $HGRCPATH:2: ' x = y'
10 foo.bar=a\nb\nc\nde\nfg
11 foo.baz=bif cb
10 hg: config error at $HGRCPATH:1: cannot include /no-such-file (No such file or directory)
12 hg: config error at $HGRCPATH:1: cannot include /no-such-file (No such file or directory)
General Comments 0
You need to be logged in to leave comments. Login now