##// END OF EJS Templates
cleanup: use x in (a, b) instead of x == a or x == b
Brodie Rao -
r12387:4f8067c9 default
parent child Browse files
Show More
@@ -1,203 +1,203 b''
1 1 # Perforce source for convert extension.
2 2 #
3 3 # Copyright 2009, Frank Kingswood <frank@kingswood-consulting.co.uk>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from mercurial import util
9 9 from mercurial.i18n import _
10 10
11 11 from common import commit, converter_source, checktool, NoRepo
12 12 import marshal
13 13 import re
14 14
15 15 def loaditer(f):
16 16 "Yield the dictionary objects generated by p4"
17 17 try:
18 18 while True:
19 19 d = marshal.load(f)
20 20 if not d:
21 21 break
22 22 yield d
23 23 except EOFError:
24 24 pass
25 25
26 26 class p4_source(converter_source):
27 27 def __init__(self, ui, path, rev=None):
28 28 super(p4_source, self).__init__(ui, path, rev=rev)
29 29
30 30 if "/" in path and not path.startswith('//'):
31 31 raise NoRepo(_('%s does not look like a P4 repository') % path)
32 32
33 33 checktool('p4', abort=False)
34 34
35 35 self.p4changes = {}
36 36 self.heads = {}
37 37 self.changeset = {}
38 38 self.files = {}
39 39 self.tags = {}
40 40 self.lastbranch = {}
41 41 self.parent = {}
42 42 self.encoding = "latin_1"
43 43 self.depotname = {} # mapping from local name to depot name
44 44 self.re_type = re.compile(
45 45 "([a-z]+)?(text|binary|symlink|apple|resource|unicode|utf\d+)"
46 46 "(\+\w+)?$")
47 47 self.re_keywords = re.compile(
48 48 r"\$(Id|Header|Date|DateTime|Change|File|Revision|Author)"
49 49 r":[^$\n]*\$")
50 50 self.re_keywords_old = re.compile("\$(Id|Header):[^$\n]*\$")
51 51
52 52 self._parse(ui, path)
53 53
54 54 def _parse_view(self, path):
55 55 "Read changes affecting the path"
56 56 cmd = 'p4 -G changes -s submitted %s' % util.shellquote(path)
57 57 stdout = util.popen(cmd, mode='rb')
58 58 for d in loaditer(stdout):
59 59 c = d.get("change", None)
60 60 if c:
61 61 self.p4changes[c] = True
62 62
63 63 def _parse(self, ui, path):
64 64 "Prepare list of P4 filenames and revisions to import"
65 65 ui.status(_('reading p4 views\n'))
66 66
67 67 # read client spec or view
68 68 if "/" in path:
69 69 self._parse_view(path)
70 70 if path.startswith("//") and path.endswith("/..."):
71 71 views = {path[:-3]:""}
72 72 else:
73 73 views = {"//": ""}
74 74 else:
75 75 cmd = 'p4 -G client -o %s' % util.shellquote(path)
76 76 clientspec = marshal.load(util.popen(cmd, mode='rb'))
77 77
78 78 views = {}
79 79 for client in clientspec:
80 80 if client.startswith("View"):
81 81 sview, cview = clientspec[client].split()
82 82 self._parse_view(sview)
83 83 if sview.endswith("...") and cview.endswith("..."):
84 84 sview = sview[:-3]
85 85 cview = cview[:-3]
86 86 cview = cview[2:]
87 87 cview = cview[cview.find("/") + 1:]
88 88 views[sview] = cview
89 89
90 90 # list of changes that affect our source files
91 91 self.p4changes = self.p4changes.keys()
92 92 self.p4changes.sort(key=int)
93 93
94 94 # list with depot pathnames, longest first
95 95 vieworder = views.keys()
96 96 vieworder.sort(key=len, reverse=True)
97 97
98 98 # handle revision limiting
99 99 startrev = self.ui.config('convert', 'p4.startrev', default=0)
100 100 self.p4changes = [x for x in self.p4changes
101 101 if ((not startrev or int(x) >= int(startrev)) and
102 102 (not self.rev or int(x) <= int(self.rev)))]
103 103
104 104 # now read the full changelists to get the list of file revisions
105 105 ui.status(_('collecting p4 changelists\n'))
106 106 lastid = None
107 107 for change in self.p4changes:
108 108 cmd = "p4 -G describe %s" % change
109 109 stdout = util.popen(cmd, mode='rb')
110 110 d = marshal.load(stdout)
111 111
112 112 desc = self.recode(d["desc"])
113 113 shortdesc = desc.split("\n", 1)[0]
114 114 t = '%s %s' % (d["change"], repr(shortdesc)[1:-1])
115 115 ui.status(util.ellipsis(t, 80) + '\n')
116 116
117 117 if lastid:
118 118 parents = [lastid]
119 119 else:
120 120 parents = []
121 121
122 122 date = (int(d["time"]), 0) # timezone not set
123 123 c = commit(author=self.recode(d["user"]), date=util.datestr(date),
124 124 parents=parents, desc=desc, branch='',
125 125 extra={"p4": change})
126 126
127 127 files = []
128 128 i = 0
129 129 while ("depotFile%d" % i) in d and ("rev%d" % i) in d:
130 130 oldname = d["depotFile%d" % i]
131 131 filename = None
132 132 for v in vieworder:
133 133 if oldname.startswith(v):
134 134 filename = views[v] + oldname[len(v):]
135 135 break
136 136 if filename:
137 137 files.append((filename, d["rev%d" % i]))
138 138 self.depotname[filename] = oldname
139 139 i += 1
140 140 self.changeset[change] = c
141 141 self.files[change] = files
142 142 lastid = change
143 143
144 144 if lastid:
145 145 self.heads = [lastid]
146 146
147 147 def getheads(self):
148 148 return self.heads
149 149
150 150 def getfile(self, name, rev):
151 151 cmd = 'p4 -G print %s' \
152 152 % util.shellquote("%s#%s" % (self.depotname[name], rev))
153 153 stdout = util.popen(cmd, mode='rb')
154 154
155 155 mode = None
156 156 contents = ""
157 157 keywords = None
158 158
159 159 for d in loaditer(stdout):
160 160 code = d["code"]
161 161 data = d.get("data")
162 162
163 163 if code == "error":
164 164 raise IOError(d["generic"], data)
165 165
166 166 elif code == "stat":
167 167 p4type = self.re_type.match(d["type"])
168 168 if p4type:
169 169 mode = ""
170 170 flags = (p4type.group(1) or "") + (p4type.group(3) or "")
171 171 if "x" in flags:
172 172 mode = "x"
173 173 if p4type.group(2) == "symlink":
174 174 mode = "l"
175 175 if "ko" in flags:
176 176 keywords = self.re_keywords_old
177 177 elif "k" in flags:
178 178 keywords = self.re_keywords
179 179
180 elif code == "text" or code == "binary":
180 elif code in ("text", "binary"):
181 181 contents += data
182 182
183 183 if mode is None:
184 184 raise IOError(0, "bad stat")
185 185
186 186 if keywords:
187 187 contents = keywords.sub("$\\1$", contents)
188 188 if mode == "l" and contents.endswith("\n"):
189 189 contents = contents[:-1]
190 190
191 191 return contents, mode
192 192
193 193 def getchanges(self, rev):
194 194 return self.files[rev], {}
195 195
196 196 def getcommit(self, rev):
197 197 return self.changeset[rev]
198 198
199 199 def gettags(self):
200 200 return self.tags
201 201
202 202 def getchangedfiles(self, rev, i):
203 203 return sorted([x[0] for x in self.files[rev]])
@@ -1,88 +1,88 b''
1 1 # ancestor.py - generic DAG ancestor algorithm for mercurial
2 2 #
3 3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import heapq
9 9
10 10 def ancestor(a, b, pfunc):
11 11 """
12 12 return a minimal-distance ancestor of nodes a and b, or None if there is no
13 13 such ancestor. Note that there can be several ancestors with the same
14 14 (minimal) distance, and the one returned is arbitrary.
15 15
16 16 pfunc must return a list of parent vertices for a given vertex
17 17 """
18 18
19 19 if a == b:
20 20 return a
21 21
22 22 a, b = sorted([a, b])
23 23
24 24 # find depth from root of all ancestors
25 25 parentcache = {}
26 26 visit = [a, b]
27 27 depth = {}
28 28 while visit:
29 29 vertex = visit[-1]
30 30 pl = pfunc(vertex)
31 31 parentcache[vertex] = pl
32 32 if not pl:
33 33 depth[vertex] = 0
34 34 visit.pop()
35 35 else:
36 36 for p in pl:
37 if p == a or p == b: # did we find a or b as a parent?
37 if p in (a, b): # did we find a or b as a parent?
38 38 return p # we're done
39 39 if p not in depth:
40 40 visit.append(p)
41 41 if visit[-1] == vertex:
42 42 depth[vertex] = min([depth[p] for p in pl]) - 1
43 43 visit.pop()
44 44
45 45 # traverse ancestors in order of decreasing distance from root
46 46 def ancestors(vertex):
47 47 h = [(depth[vertex], vertex)]
48 48 seen = set()
49 49 while h:
50 50 d, n = heapq.heappop(h)
51 51 if n not in seen:
52 52 seen.add(n)
53 53 yield (d, n)
54 54 for p in parentcache[n]:
55 55 heapq.heappush(h, (depth[p], p))
56 56
57 57 def generations(vertex):
58 58 sg, s = None, set()
59 59 for g, v in ancestors(vertex):
60 60 if g != sg:
61 61 if sg:
62 62 yield sg, s
63 63 sg, s = g, set((v,))
64 64 else:
65 65 s.add(v)
66 66 yield sg, s
67 67
68 68 x = generations(a)
69 69 y = generations(b)
70 70 gx = x.next()
71 71 gy = y.next()
72 72
73 73 # increment each ancestor list until it is closer to root than
74 74 # the other, or they match
75 75 try:
76 76 while 1:
77 77 if gx[0] == gy[0]:
78 78 for v in gx[1]:
79 79 if v in gy[1]:
80 80 return v
81 81 gy = y.next()
82 82 gx = x.next()
83 83 elif gx[0] > gy[0]:
84 84 gy = y.next()
85 85 else:
86 86 gx = x.next()
87 87 except StopIteration:
88 88 return None
@@ -1,474 +1,474 b''
1 1 # dagparser.py - parser and generator for concise description of DAGs
2 2 #
3 3 # Copyright 2010 Peter Arrenbrecht <peter@arrenbrecht.ch>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import re, string
9 9 import util
10 10 from i18n import _
11 11
12 12 def parsedag(desc):
13 13 '''parses a DAG from a concise textual description; generates events
14 14
15 15 "+n" is a linear run of n nodes based on the current default parent
16 16 "." is a single node based on the current default parent
17 17 "$" resets the default parent to -1 (implied at the start);
18 18 otherwise the default parent is always the last node created
19 19 "<p" sets the default parent to the backref p
20 20 "*p" is a fork at parent p, where p is a backref
21 21 "*p1/p2/.../pn" is a merge of parents p1..pn, where the pi are backrefs
22 22 "/p2/.../pn" is a merge of the preceding node and p2..pn
23 23 ":name" defines a label for the preceding node; labels can be redefined
24 24 "@text" emits an annotation event for text
25 25 "!command" emits an action event for the current node
26 26 "!!my command\n" is like "!", but to the end of the line
27 27 "#...\n" is a comment up to the end of the line
28 28
29 29 Whitespace between the above elements is ignored.
30 30
31 31 A backref is either
32 32 * a number n, which references the node curr-n, where curr is the current
33 33 node, or
34 34 * the name of a label you placed earlier using ":name", or
35 35 * empty to denote the default parent.
36 36
37 37 All string valued-elements are either strictly alphanumeric, or must
38 38 be enclosed in double quotes ("..."), with "\" as escape character.
39 39
40 40 Generates sequence of
41 41
42 42 ('n', (id, [parentids])) for node creation
43 43 ('l', (id, labelname)) for labels on nodes
44 44 ('a', text) for annotations
45 45 ('c', command) for actions (!)
46 46 ('C', command) for line actions (!!)
47 47
48 48 Examples
49 49 --------
50 50
51 51 Example of a complex graph (output not shown for brevity):
52 52
53 53 >>> len(list(parsedag("""
54 54 ...
55 55 ... +3 # 3 nodes in linear run
56 56 ... :forkhere # a label for the last of the 3 nodes from above
57 57 ... +5 # 5 more nodes on one branch
58 58 ... :mergethis # label again
59 59 ... <forkhere # set default parent to labelled fork node
60 60 ... +10 # 10 more nodes on a parallel branch
61 61 ... @stable # following nodes will be annotated as "stable"
62 62 ... +5 # 5 nodes in stable
63 63 ... !addfile # custom command; could trigger new file in next node
64 64 ... +2 # two more nodes
65 65 ... /mergethis # merge last node with labelled node
66 66 ... +4 # 4 more nodes descending from merge node
67 67 ...
68 68 ... """)))
69 69 34
70 70
71 71 Empty list:
72 72
73 73 >>> list(parsedag(""))
74 74 []
75 75
76 76 A simple linear run:
77 77
78 78 >>> list(parsedag("+3"))
79 79 [('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [1]))]
80 80
81 81 Some non-standard ways to define such runs:
82 82
83 83 >>> list(parsedag("+1+2"))
84 84 [('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [1]))]
85 85
86 86 >>> list(parsedag("+1*1*"))
87 87 [('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [1]))]
88 88
89 89 >>> list(parsedag("*"))
90 90 [('n', (0, [-1]))]
91 91
92 92 >>> list(parsedag("..."))
93 93 [('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [1]))]
94 94
95 95 A fork and a join, using numeric back references:
96 96
97 97 >>> list(parsedag("+2*2*/2"))
98 98 [('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [0])), ('n', (3, [2, 1]))]
99 99
100 100 >>> list(parsedag("+2<2+1/2"))
101 101 [('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [0])), ('n', (3, [2, 1]))]
102 102
103 103 Placing a label:
104 104
105 105 >>> list(parsedag("+1 :mylabel +1"))
106 106 [('n', (0, [-1])), ('l', (0, 'mylabel')), ('n', (1, [0]))]
107 107
108 108 An empty label (silly, really):
109 109
110 110 >>> list(parsedag("+1:+1"))
111 111 [('n', (0, [-1])), ('l', (0, '')), ('n', (1, [0]))]
112 112
113 113 Fork and join, but with labels instead of numeric back references:
114 114
115 115 >>> list(parsedag("+1:f +1:p2 *f */p2"))
116 116 [('n', (0, [-1])), ('l', (0, 'f')), ('n', (1, [0])), ('l', (1, 'p2')),
117 117 ('n', (2, [0])), ('n', (3, [2, 1]))]
118 118
119 119 >>> list(parsedag("+1:f +1:p2 <f +1 /p2"))
120 120 [('n', (0, [-1])), ('l', (0, 'f')), ('n', (1, [0])), ('l', (1, 'p2')),
121 121 ('n', (2, [0])), ('n', (3, [2, 1]))]
122 122
123 123 Restarting from the root:
124 124
125 125 >>> list(parsedag("+1 $ +1"))
126 126 [('n', (0, [-1])), ('n', (1, [-1]))]
127 127
128 128 Annotations, which are meant to introduce sticky state for subsequent nodes:
129 129
130 130 >>> list(parsedag("+1 @ann +1"))
131 131 [('n', (0, [-1])), ('a', 'ann'), ('n', (1, [0]))]
132 132
133 133 >>> list(parsedag('+1 @"my annotation" +1'))
134 134 [('n', (0, [-1])), ('a', 'my annotation'), ('n', (1, [0]))]
135 135
136 136 Commands, which are meant to operate on the most recently created node:
137 137
138 138 >>> list(parsedag("+1 !cmd +1"))
139 139 [('n', (0, [-1])), ('c', 'cmd'), ('n', (1, [0]))]
140 140
141 141 >>> list(parsedag('+1 !"my command" +1'))
142 142 [('n', (0, [-1])), ('c', 'my command'), ('n', (1, [0]))]
143 143
144 144 >>> list(parsedag('+1 !!my command line\\n +1'))
145 145 [('n', (0, [-1])), ('C', 'my command line'), ('n', (1, [0]))]
146 146
147 147 Comments, which extend to the end of the line:
148 148
149 149 >>> list(parsedag('+1 # comment\\n+1'))
150 150 [('n', (0, [-1])), ('n', (1, [0]))]
151 151
152 152 Error:
153 153
154 154 >>> try: list(parsedag('+1 bad'))
155 155 ... except Exception, e: print e
156 156 invalid character in dag description: bad...
157 157
158 158 '''
159 159 if not desc:
160 160 return
161 161
162 162 wordchars = string.ascii_letters + string.digits
163 163
164 164 labels = {}
165 165 p1 = -1
166 166 r = 0
167 167
168 168 def resolve(ref):
169 169 if not ref:
170 170 return p1
171 171 elif ref[0] in string.digits:
172 172 return r - int(ref)
173 173 else:
174 174 return labels[ref]
175 175
176 176 chiter = (c for c in desc)
177 177
178 178 def nextch():
179 179 try:
180 180 return chiter.next()
181 181 except StopIteration:
182 182 return '\0'
183 183
184 184 def nextrun(c, allow):
185 185 s = ''
186 186 while c in allow:
187 187 s += c
188 188 c = nextch()
189 189 return c, s
190 190
191 191 def nextdelimited(c, limit, escape):
192 192 s = ''
193 193 while c != limit:
194 194 if c == escape:
195 195 c = nextch()
196 196 s += c
197 197 c = nextch()
198 198 return nextch(), s
199 199
200 200 def nextstring(c):
201 201 if c == '"':
202 202 return nextdelimited(nextch(), '"', '\\')
203 203 else:
204 204 return nextrun(c, wordchars)
205 205
206 206 c = nextch()
207 207 while c != '\0':
208 208 while c in string.whitespace:
209 209 c = nextch()
210 210 if c == '.':
211 211 yield 'n', (r, [p1])
212 212 p1 = r
213 213 r += 1
214 214 c = nextch()
215 215 elif c == '+':
216 216 c, digs = nextrun(nextch(), string.digits)
217 217 n = int(digs)
218 218 for i in xrange(0, n):
219 219 yield 'n', (r, [p1])
220 220 p1 = r
221 221 r += 1
222 elif c == '*' or c == '/':
222 elif c in '*/':
223 223 if c == '*':
224 224 c = nextch()
225 225 c, pref = nextstring(c)
226 226 prefs = [pref]
227 227 while c == '/':
228 228 c, pref = nextstring(nextch())
229 229 prefs.append(pref)
230 230 ps = [resolve(ref) for ref in prefs]
231 231 yield 'n', (r, ps)
232 232 p1 = r
233 233 r += 1
234 234 elif c == '<':
235 235 c, ref = nextstring(nextch())
236 236 p1 = resolve(ref)
237 237 elif c == ':':
238 238 c, name = nextstring(nextch())
239 239 labels[name] = p1
240 240 yield 'l', (p1, name)
241 241 elif c == '@':
242 242 c, text = nextstring(nextch())
243 243 yield 'a', text
244 244 elif c == '!':
245 245 c = nextch()
246 246 if c == '!':
247 247 cmd = ''
248 248 c = nextch()
249 249 while c not in '\n\r\0':
250 250 cmd += c
251 251 c = nextch()
252 252 yield 'C', cmd
253 253 else:
254 254 c, cmd = nextstring(c)
255 255 yield 'c', cmd
256 256 elif c == '#':
257 257 while c not in '\n\r\0':
258 258 c = nextch()
259 259 elif c == '$':
260 260 p1 = -1
261 261 c = nextch()
262 262 elif c == '\0':
263 263 return # in case it was preceded by whitespace
264 264 else:
265 265 s = ''
266 266 i = 0
267 267 while c != '\0' and i < 10:
268 268 s += c
269 269 i += 1
270 270 c = nextch()
271 271 raise util.Abort(_("invalid character in dag description: %s...") % s)
272 272
273 273 def dagtextlines(events,
274 274 addspaces=True,
275 275 wraplabels=False,
276 276 wrapannotations=False,
277 277 wrapcommands=False,
278 278 wrapnonlinear=False,
279 279 usedots=False,
280 280 maxlinewidth=70):
281 281 '''generates single lines for dagtext()'''
282 282
283 283 def wrapstring(text):
284 284 if re.match("^[0-9a-z]*$", text):
285 285 return text
286 286 return '"' + text.replace('\\', '\\\\').replace('"', '\"') + '"'
287 287
288 288 def gen():
289 289 labels = {}
290 290 run = 0
291 291 wantr = 0
292 292 needroot = False
293 293 for kind, data in events:
294 294 if kind == 'n':
295 295 r, ps = data
296 296
297 297 # sanity check
298 298 if r != wantr:
299 299 raise util.Abort(_("expected id %i, got %i") % (wantr, r))
300 300 if not ps:
301 301 ps = [-1]
302 302 else:
303 303 for p in ps:
304 304 if p >= r:
305 305 raise util.Abort(_("parent id %i is larger than "
306 306 "current id %i") % (p, r))
307 307 wantr += 1
308 308
309 309 # new root?
310 310 p1 = r - 1
311 311 if len(ps) == 1 and ps[0] == -1:
312 312 if needroot:
313 313 if run:
314 314 yield '+' + str(run)
315 315 run = 0
316 316 if wrapnonlinear:
317 317 yield '\n'
318 318 yield '$'
319 319 p1 = -1
320 320 else:
321 321 needroot = True
322 322 if len(ps) == 1 and ps[0] == p1:
323 323 if usedots:
324 324 yield "."
325 325 else:
326 326 run += 1
327 327 else:
328 328 if run:
329 329 yield '+' + str(run)
330 330 run = 0
331 331 if wrapnonlinear:
332 332 yield '\n'
333 333 prefs = []
334 334 for p in ps:
335 335 if p == p1:
336 336 prefs.append('')
337 337 elif p in labels:
338 338 prefs.append(labels[p])
339 339 else:
340 340 prefs.append(str(r - p))
341 341 yield '*' + '/'.join(prefs)
342 342 else:
343 343 if run:
344 344 yield '+' + str(run)
345 345 run = 0
346 346 if kind == 'l':
347 347 rid, name = data
348 348 labels[rid] = name
349 349 yield ':' + name
350 350 if wraplabels:
351 351 yield '\n'
352 352 elif kind == 'c':
353 353 yield '!' + wrapstring(data)
354 354 if wrapcommands:
355 355 yield '\n'
356 356 elif kind == 'C':
357 357 yield '!!' + data
358 358 yield '\n'
359 359 elif kind == 'a':
360 360 if wrapannotations:
361 361 yield '\n'
362 362 yield '@' + wrapstring(data)
363 363 elif kind == '#':
364 364 yield '#' + data
365 365 yield '\n'
366 366 else:
367 367 raise util.Abort(_("invalid event type in dag: %s")
368 368 % str((type, data)))
369 369 if run:
370 370 yield '+' + str(run)
371 371
372 372 line = ''
373 373 for part in gen():
374 374 if part == '\n':
375 375 if line:
376 376 yield line
377 377 line = ''
378 378 else:
379 379 if len(line) + len(part) >= maxlinewidth:
380 380 yield line
381 381 line = ''
382 382 elif addspaces and line and part != '.':
383 383 line += ' '
384 384 line += part
385 385 if line:
386 386 yield line
387 387
388 388 def dagtext(dag,
389 389 addspaces=True,
390 390 wraplabels=False,
391 391 wrapannotations=False,
392 392 wrapcommands=False,
393 393 wrapnonlinear=False,
394 394 usedots=False,
395 395 maxlinewidth=70):
396 396 '''generates lines of a textual representation for a dag event stream
397 397
398 398 events should generate what parsedag() does, so:
399 399
400 400 ('n', (id, [parentids])) for node creation
401 401 ('l', (id, labelname)) for labels on nodes
402 402 ('a', text) for annotations
403 403 ('c', text) for commands
404 404 ('C', text) for line commands ('!!')
405 405 ('#', text) for comment lines
406 406
407 407 Parent nodes must come before child nodes.
408 408
409 409 Examples
410 410 --------
411 411
412 412 Linear run:
413 413
414 414 >>> dagtext([('n', (0, [-1])), ('n', (1, [0]))])
415 415 '+2'
416 416
417 417 Two roots:
418 418
419 419 >>> dagtext([('n', (0, [-1])), ('n', (1, [-1]))])
420 420 '+1 $ +1'
421 421
422 422 Fork and join:
423 423
424 424 >>> dagtext([('n', (0, [-1])), ('n', (1, [0])), ('n', (2, [0])),
425 425 ... ('n', (3, [2, 1]))])
426 426 '+2 *2 */2'
427 427
428 428 Fork and join with labels:
429 429
430 430 >>> dagtext([('n', (0, [-1])), ('l', (0, 'f')), ('n', (1, [0])),
431 431 ... ('l', (1, 'p2')), ('n', (2, [0])), ('n', (3, [2, 1]))])
432 432 '+1 :f +1 :p2 *f */p2'
433 433
434 434 Annotations:
435 435
436 436 >>> dagtext([('n', (0, [-1])), ('a', 'ann'), ('n', (1, [0]))])
437 437 '+1 @ann +1'
438 438
439 439 >>> dagtext([('n', (0, [-1])), ('a', 'my annotation'), ('n', (1, [0]))])
440 440 '+1 @"my annotation" +1'
441 441
442 442 Commands:
443 443
444 444 >>> dagtext([('n', (0, [-1])), ('c', 'cmd'), ('n', (1, [0]))])
445 445 '+1 !cmd +1'
446 446
447 447 >>> dagtext([('n', (0, [-1])), ('c', 'my command'), ('n', (1, [0]))])
448 448 '+1 !"my command" +1'
449 449
450 450 >>> dagtext([('n', (0, [-1])), ('C', 'my command line'), ('n', (1, [0]))])
451 451 '+1 !!my command line\\n+1'
452 452
453 453 Comments:
454 454
455 455 >>> dagtext([('n', (0, [-1])), ('#', ' comment'), ('n', (1, [0]))])
456 456 '+1 # comment\\n+1'
457 457
458 458 >>> dagtext([])
459 459 ''
460 460
461 461 Combining parsedag and dagtext:
462 462
463 463 >>> dagtext(parsedag('+1 :f +1 :p2 *f */p2'))
464 464 '+1 :f +1 :p2 *f */p2'
465 465
466 466 '''
467 467 return "\n".join(dagtextlines(dag,
468 468 addspaces,
469 469 wraplabels,
470 470 wrapannotations,
471 471 wrapcommands,
472 472 wrapnonlinear,
473 473 usedots,
474 474 maxlinewidth))
@@ -1,680 +1,680 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid
9 9 from i18n import _
10 10 import util, ignore, osutil, parsers
11 11 import struct, os, stat, errno
12 12 import cStringIO
13 13
14 14 _format = ">cllll"
15 15 propertycache = util.propertycache
16 16
17 17 def _finddirs(path):
18 18 pos = path.rfind('/')
19 19 while pos != -1:
20 20 yield path[:pos]
21 21 pos = path.rfind('/', 0, pos)
22 22
23 23 def _incdirs(dirs, path):
24 24 for base in _finddirs(path):
25 25 if base in dirs:
26 26 dirs[base] += 1
27 27 return
28 28 dirs[base] = 1
29 29
30 30 def _decdirs(dirs, path):
31 31 for base in _finddirs(path):
32 32 if dirs[base] > 1:
33 33 dirs[base] -= 1
34 34 return
35 35 del dirs[base]
36 36
37 37 class dirstate(object):
38 38
39 39 def __init__(self, opener, ui, root):
40 40 '''Create a new dirstate object.
41 41
42 42 opener is an open()-like callable that can be used to open the
43 43 dirstate file; root is the root of the directory tracked by
44 44 the dirstate.
45 45 '''
46 46 self._opener = opener
47 47 self._root = root
48 48 self._rootdir = os.path.join(root, '')
49 49 self._dirty = False
50 50 self._dirtypl = False
51 51 self._ui = ui
52 52
53 53 @propertycache
54 54 def _map(self):
55 55 '''Return the dirstate contents as a map from filename to
56 56 (state, mode, size, time).'''
57 57 self._read()
58 58 return self._map
59 59
60 60 @propertycache
61 61 def _copymap(self):
62 62 self._read()
63 63 return self._copymap
64 64
65 65 @propertycache
66 66 def _foldmap(self):
67 67 f = {}
68 68 for name in self._map:
69 69 f[os.path.normcase(name)] = name
70 70 return f
71 71
72 72 @propertycache
73 73 def _branch(self):
74 74 try:
75 75 return self._opener("branch").read().strip() or "default"
76 76 except IOError:
77 77 return "default"
78 78
79 79 @propertycache
80 80 def _pl(self):
81 81 try:
82 82 st = self._opener("dirstate").read(40)
83 83 l = len(st)
84 84 if l == 40:
85 85 return st[:20], st[20:40]
86 86 elif l > 0 and l < 40:
87 87 raise util.Abort(_('working directory state appears damaged!'))
88 88 except IOError, err:
89 89 if err.errno != errno.ENOENT:
90 90 raise
91 91 return [nullid, nullid]
92 92
93 93 @propertycache
94 94 def _dirs(self):
95 95 dirs = {}
96 96 for f, s in self._map.iteritems():
97 97 if s[0] != 'r':
98 98 _incdirs(dirs, f)
99 99 return dirs
100 100
101 101 @propertycache
102 102 def _ignore(self):
103 103 files = [self._join('.hgignore')]
104 104 for name, path in self._ui.configitems("ui"):
105 105 if name == 'ignore' or name.startswith('ignore.'):
106 106 files.append(util.expandpath(path))
107 107 return ignore.ignore(self._root, files, self._ui.warn)
108 108
109 109 @propertycache
110 110 def _slash(self):
111 111 return self._ui.configbool('ui', 'slash') and os.sep != '/'
112 112
113 113 @propertycache
114 114 def _checklink(self):
115 115 return util.checklink(self._root)
116 116
117 117 @propertycache
118 118 def _checkexec(self):
119 119 return util.checkexec(self._root)
120 120
121 121 @propertycache
122 122 def _checkcase(self):
123 123 return not util.checkcase(self._join('.hg'))
124 124
125 125 def _join(self, f):
126 126 # much faster than os.path.join()
127 127 # it's safe because f is always a relative path
128 128 return self._rootdir + f
129 129
130 130 def flagfunc(self, fallback):
131 131 if self._checklink:
132 132 if self._checkexec:
133 133 def f(x):
134 134 p = self._join(x)
135 135 if os.path.islink(p):
136 136 return 'l'
137 137 if util.is_exec(p):
138 138 return 'x'
139 139 return ''
140 140 return f
141 141 def f(x):
142 142 if os.path.islink(self._join(x)):
143 143 return 'l'
144 144 if 'x' in fallback(x):
145 145 return 'x'
146 146 return ''
147 147 return f
148 148 if self._checkexec:
149 149 def f(x):
150 150 if 'l' in fallback(x):
151 151 return 'l'
152 152 if util.is_exec(self._join(x)):
153 153 return 'x'
154 154 return ''
155 155 return f
156 156 return fallback
157 157
158 158 def getcwd(self):
159 159 cwd = os.getcwd()
160 160 if cwd == self._root:
161 161 return ''
162 162 # self._root ends with a path separator if self._root is '/' or 'C:\'
163 163 rootsep = self._root
164 164 if not util.endswithsep(rootsep):
165 165 rootsep += os.sep
166 166 if cwd.startswith(rootsep):
167 167 return cwd[len(rootsep):]
168 168 else:
169 169 # we're outside the repo. return an absolute path.
170 170 return cwd
171 171
172 172 def pathto(self, f, cwd=None):
173 173 if cwd is None:
174 174 cwd = self.getcwd()
175 175 path = util.pathto(self._root, cwd, f)
176 176 if self._slash:
177 177 return util.normpath(path)
178 178 return path
179 179
180 180 def __getitem__(self, key):
181 181 '''Return the current state of key (a filename) in the dirstate.
182 182
183 183 States are:
184 184 n normal
185 185 m needs merging
186 186 r marked for removal
187 187 a marked for addition
188 188 ? not tracked
189 189 '''
190 190 return self._map.get(key, ("?",))[0]
191 191
192 192 def __contains__(self, key):
193 193 return key in self._map
194 194
195 195 def __iter__(self):
196 196 for x in sorted(self._map):
197 197 yield x
198 198
199 199 def parents(self):
200 200 return self._pl
201 201
202 202 def branch(self):
203 203 return self._branch
204 204
205 205 def setparents(self, p1, p2=nullid):
206 206 self._dirty = self._dirtypl = True
207 207 self._pl = p1, p2
208 208
209 209 def setbranch(self, branch):
210 210 if branch in ['tip', '.', 'null']:
211 211 raise util.Abort(_('the name \'%s\' is reserved') % branch)
212 212 self._branch = branch
213 213 self._opener("branch", "w").write(branch + '\n')
214 214
215 215 def _read(self):
216 216 self._map = {}
217 217 self._copymap = {}
218 218 try:
219 219 st = self._opener("dirstate").read()
220 220 except IOError, err:
221 221 if err.errno != errno.ENOENT:
222 222 raise
223 223 return
224 224 if not st:
225 225 return
226 226
227 227 p = parsers.parse_dirstate(self._map, self._copymap, st)
228 228 if not self._dirtypl:
229 229 self._pl = p
230 230
231 231 def invalidate(self):
232 232 for a in "_map _copymap _foldmap _branch _pl _dirs _ignore".split():
233 233 if a in self.__dict__:
234 234 delattr(self, a)
235 235 self._dirty = False
236 236
237 237 def copy(self, source, dest):
238 238 """Mark dest as a copy of source. Unmark dest if source is None."""
239 239 if source == dest:
240 240 return
241 241 self._dirty = True
242 242 if source is not None:
243 243 self._copymap[dest] = source
244 244 elif dest in self._copymap:
245 245 del self._copymap[dest]
246 246
247 247 def copied(self, file):
248 248 return self._copymap.get(file, None)
249 249
250 250 def copies(self):
251 251 return self._copymap
252 252
253 253 def _droppath(self, f):
254 254 if self[f] not in "?r" and "_dirs" in self.__dict__:
255 255 _decdirs(self._dirs, f)
256 256
257 257 def _addpath(self, f, check=False):
258 258 oldstate = self[f]
259 259 if check or oldstate == "r":
260 260 if '\r' in f or '\n' in f:
261 261 raise util.Abort(
262 262 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
263 263 if f in self._dirs:
264 264 raise util.Abort(_('directory %r already in dirstate') % f)
265 265 # shadows
266 266 for d in _finddirs(f):
267 267 if d in self._dirs:
268 268 break
269 269 if d in self._map and self[d] != 'r':
270 270 raise util.Abort(
271 271 _('file %r in dirstate clashes with %r') % (d, f))
272 272 if oldstate in "?r" and "_dirs" in self.__dict__:
273 273 _incdirs(self._dirs, f)
274 274
275 275 def normal(self, f):
276 276 '''Mark a file normal and clean.'''
277 277 self._dirty = True
278 278 self._addpath(f)
279 279 s = os.lstat(self._join(f))
280 280 self._map[f] = ('n', s.st_mode, s.st_size, int(s.st_mtime))
281 281 if f in self._copymap:
282 282 del self._copymap[f]
283 283
284 284 def normallookup(self, f):
285 285 '''Mark a file normal, but possibly dirty.'''
286 286 if self._pl[1] != nullid and f in self._map:
287 287 # if there is a merge going on and the file was either
288 288 # in state 'm' (-1) or coming from other parent (-2) before
289 289 # being removed, restore that state.
290 290 entry = self._map[f]
291 291 if entry[0] == 'r' and entry[2] in (-1, -2):
292 292 source = self._copymap.get(f)
293 293 if entry[2] == -1:
294 294 self.merge(f)
295 295 elif entry[2] == -2:
296 296 self.otherparent(f)
297 297 if source:
298 298 self.copy(source, f)
299 299 return
300 300 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
301 301 return
302 302 self._dirty = True
303 303 self._addpath(f)
304 304 self._map[f] = ('n', 0, -1, -1)
305 305 if f in self._copymap:
306 306 del self._copymap[f]
307 307
308 308 def otherparent(self, f):
309 309 '''Mark as coming from the other parent, always dirty.'''
310 310 if self._pl[1] == nullid:
311 311 raise util.Abort(_("setting %r to other parent "
312 312 "only allowed in merges") % f)
313 313 self._dirty = True
314 314 self._addpath(f)
315 315 self._map[f] = ('n', 0, -2, -1)
316 316 if f in self._copymap:
317 317 del self._copymap[f]
318 318
319 319 def add(self, f):
320 320 '''Mark a file added.'''
321 321 self._dirty = True
322 322 self._addpath(f, True)
323 323 self._map[f] = ('a', 0, -1, -1)
324 324 if f in self._copymap:
325 325 del self._copymap[f]
326 326
327 327 def remove(self, f):
328 328 '''Mark a file removed.'''
329 329 self._dirty = True
330 330 self._droppath(f)
331 331 size = 0
332 332 if self._pl[1] != nullid and f in self._map:
333 333 # backup the previous state
334 334 entry = self._map[f]
335 335 if entry[0] == 'm': # merge
336 336 size = -1
337 337 elif entry[0] == 'n' and entry[2] == -2: # other parent
338 338 size = -2
339 339 self._map[f] = ('r', 0, size, 0)
340 340 if size == 0 and f in self._copymap:
341 341 del self._copymap[f]
342 342
343 343 def merge(self, f):
344 344 '''Mark a file merged.'''
345 345 self._dirty = True
346 346 s = os.lstat(self._join(f))
347 347 self._addpath(f)
348 348 self._map[f] = ('m', s.st_mode, s.st_size, int(s.st_mtime))
349 349 if f in self._copymap:
350 350 del self._copymap[f]
351 351
352 352 def forget(self, f):
353 353 '''Forget a file.'''
354 354 self._dirty = True
355 355 try:
356 356 self._droppath(f)
357 357 del self._map[f]
358 358 except KeyError:
359 359 self._ui.warn(_("not in dirstate: %s\n") % f)
360 360
361 361 def _normalize(self, path, knownpath):
362 362 norm_path = os.path.normcase(path)
363 363 fold_path = self._foldmap.get(norm_path, None)
364 364 if fold_path is None:
365 365 if knownpath or not os.path.lexists(os.path.join(self._root, path)):
366 366 fold_path = path
367 367 else:
368 368 fold_path = self._foldmap.setdefault(norm_path,
369 369 util.fspath(path, self._root))
370 370 return fold_path
371 371
372 372 def clear(self):
373 373 self._map = {}
374 374 if "_dirs" in self.__dict__:
375 375 delattr(self, "_dirs")
376 376 self._copymap = {}
377 377 self._pl = [nullid, nullid]
378 378 self._dirty = True
379 379
380 380 def rebuild(self, parent, files):
381 381 self.clear()
382 382 for f in files:
383 383 if 'x' in files.flags(f):
384 384 self._map[f] = ('n', 0777, -1, 0)
385 385 else:
386 386 self._map[f] = ('n', 0666, -1, 0)
387 387 self._pl = (parent, nullid)
388 388 self._dirty = True
389 389
390 390 def write(self):
391 391 if not self._dirty:
392 392 return
393 393 st = self._opener("dirstate", "w", atomictemp=True)
394 394
395 395 # use the modification time of the newly created temporary file as the
396 396 # filesystem's notion of 'now'
397 397 now = int(util.fstat(st).st_mtime)
398 398
399 399 cs = cStringIO.StringIO()
400 400 copymap = self._copymap
401 401 pack = struct.pack
402 402 write = cs.write
403 403 write("".join(self._pl))
404 404 for f, e in self._map.iteritems():
405 405 if e[0] == 'n' and e[3] == now:
406 406 # The file was last modified "simultaneously" with the current
407 407 # write to dirstate (i.e. within the same second for file-
408 408 # systems with a granularity of 1 sec). This commonly happens
409 409 # for at least a couple of files on 'update'.
410 410 # The user could change the file without changing its size
411 411 # within the same second. Invalidate the file's stat data in
412 412 # dirstate, forcing future 'status' calls to compare the
413 413 # contents of the file. This prevents mistakenly treating such
414 414 # files as clean.
415 415 e = (e[0], 0, -1, -1) # mark entry as 'unset'
416 416 self._map[f] = e
417 417
418 418 if f in copymap:
419 419 f = "%s\0%s" % (f, copymap[f])
420 420 e = pack(_format, e[0], e[1], e[2], e[3], len(f))
421 421 write(e)
422 422 write(f)
423 423 st.write(cs.getvalue())
424 424 st.rename()
425 425 self._dirty = self._dirtypl = False
426 426
427 427 def _dirignore(self, f):
428 428 if f == '.':
429 429 return False
430 430 if self._ignore(f):
431 431 return True
432 432 for p in _finddirs(f):
433 433 if self._ignore(p):
434 434 return True
435 435 return False
436 436
437 437 def walk(self, match, subrepos, unknown, ignored):
438 438 '''
439 439 Walk recursively through the directory tree, finding all files
440 440 matched by match.
441 441
442 442 Return a dict mapping filename to stat-like object (either
443 443 mercurial.osutil.stat instance or return value of os.stat()).
444 444 '''
445 445
446 446 def fwarn(f, msg):
447 447 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
448 448 return False
449 449
450 450 def badtype(mode):
451 451 kind = _('unknown')
452 452 if stat.S_ISCHR(mode):
453 453 kind = _('character device')
454 454 elif stat.S_ISBLK(mode):
455 455 kind = _('block device')
456 456 elif stat.S_ISFIFO(mode):
457 457 kind = _('fifo')
458 458 elif stat.S_ISSOCK(mode):
459 459 kind = _('socket')
460 460 elif stat.S_ISDIR(mode):
461 461 kind = _('directory')
462 462 return _('unsupported file type (type is %s)') % kind
463 463
464 464 ignore = self._ignore
465 465 dirignore = self._dirignore
466 466 if ignored:
467 467 ignore = util.never
468 468 dirignore = util.never
469 469 elif not unknown:
470 470 # if unknown and ignored are False, skip step 2
471 471 ignore = util.always
472 472 dirignore = util.always
473 473
474 474 matchfn = match.matchfn
475 475 badfn = match.bad
476 476 dmap = self._map
477 477 normpath = util.normpath
478 478 listdir = osutil.listdir
479 479 lstat = os.lstat
480 480 getkind = stat.S_IFMT
481 481 dirkind = stat.S_IFDIR
482 482 regkind = stat.S_IFREG
483 483 lnkkind = stat.S_IFLNK
484 484 join = self._join
485 485 work = []
486 486 wadd = work.append
487 487
488 488 if self._checkcase:
489 489 normalize = self._normalize
490 490 else:
491 491 normalize = lambda x, y: x
492 492
493 493 exact = skipstep3 = False
494 494 if matchfn == match.exact: # match.exact
495 495 exact = True
496 496 dirignore = util.always # skip step 2
497 497 elif match.files() and not match.anypats(): # match.match, no patterns
498 498 skipstep3 = True
499 499
500 500 files = sorted(match.files())
501 501 subrepos.sort()
502 502 i, j = 0, 0
503 503 while i < len(files) and j < len(subrepos):
504 504 subpath = subrepos[j] + "/"
505 505 if not files[i].startswith(subpath):
506 506 i += 1
507 507 continue
508 508 while files and files[i].startswith(subpath):
509 509 del files[i]
510 510 j += 1
511 511
512 512 if not files or '.' in files:
513 513 files = ['']
514 514 results = dict.fromkeys(subrepos)
515 515 results['.hg'] = None
516 516
517 517 # step 1: find all explicit files
518 518 for ff in files:
519 519 nf = normalize(normpath(ff), False)
520 520 if nf in results:
521 521 continue
522 522
523 523 try:
524 524 st = lstat(join(nf))
525 525 kind = getkind(st.st_mode)
526 526 if kind == dirkind:
527 527 skipstep3 = False
528 528 if nf in dmap:
529 529 #file deleted on disk but still in dirstate
530 530 results[nf] = None
531 531 match.dir(nf)
532 532 if not dirignore(nf):
533 533 wadd(nf)
534 elif kind == regkind or kind == lnkkind:
534 elif kind in (regkind, lnkkind):
535 535 results[nf] = st
536 536 else:
537 537 badfn(ff, badtype(kind))
538 538 if nf in dmap:
539 539 results[nf] = None
540 540 except OSError, inst:
541 541 if nf in dmap: # does it exactly match a file?
542 542 results[nf] = None
543 543 else: # does it match a directory?
544 544 prefix = nf + "/"
545 545 for fn in dmap:
546 546 if fn.startswith(prefix):
547 547 match.dir(nf)
548 548 skipstep3 = False
549 549 break
550 550 else:
551 551 badfn(ff, inst.strerror)
552 552
553 553 # step 2: visit subdirectories
554 554 while work:
555 555 nd = work.pop()
556 556 skip = None
557 557 if nd == '.':
558 558 nd = ''
559 559 else:
560 560 skip = '.hg'
561 561 try:
562 562 entries = listdir(join(nd), stat=True, skip=skip)
563 563 except OSError, inst:
564 564 if inst.errno == errno.EACCES:
565 565 fwarn(nd, inst.strerror)
566 566 continue
567 567 raise
568 568 for f, kind, st in entries:
569 569 nf = normalize(nd and (nd + "/" + f) or f, True)
570 570 if nf not in results:
571 571 if kind == dirkind:
572 572 if not ignore(nf):
573 573 match.dir(nf)
574 574 wadd(nf)
575 575 if nf in dmap and matchfn(nf):
576 576 results[nf] = None
577 577 elif kind == regkind or kind == lnkkind:
578 578 if nf in dmap:
579 579 if matchfn(nf):
580 580 results[nf] = st
581 581 elif matchfn(nf) and not ignore(nf):
582 582 results[nf] = st
583 583 elif nf in dmap and matchfn(nf):
584 584 results[nf] = None
585 585
586 586 # step 3: report unseen items in the dmap hash
587 587 if not skipstep3 and not exact:
588 588 visit = sorted([f for f in dmap if f not in results and matchfn(f)])
589 589 for nf, st in zip(visit, util.statfiles([join(i) for i in visit])):
590 590 if not st is None and not getkind(st.st_mode) in (regkind, lnkkind):
591 591 st = None
592 592 results[nf] = st
593 593 for s in subrepos:
594 594 del results[s]
595 595 del results['.hg']
596 596 return results
597 597
598 598 def status(self, match, subrepos, ignored, clean, unknown):
599 599 '''Determine the status of the working copy relative to the
600 600 dirstate and return a tuple of lists (unsure, modified, added,
601 601 removed, deleted, unknown, ignored, clean), where:
602 602
603 603 unsure:
604 604 files that might have been modified since the dirstate was
605 605 written, but need to be read to be sure (size is the same
606 606 but mtime differs)
607 607 modified:
608 608 files that have definitely been modified since the dirstate
609 609 was written (different size or mode)
610 610 added:
611 611 files that have been explicitly added with hg add
612 612 removed:
613 613 files that have been explicitly removed with hg remove
614 614 deleted:
615 615 files that have been deleted through other means ("missing")
616 616 unknown:
617 617 files not in the dirstate that are not ignored
618 618 ignored:
619 619 files not in the dirstate that are ignored
620 620 (by _dirignore())
621 621 clean:
622 622 files that have definitely not been modified since the
623 623 dirstate was written
624 624 '''
625 625 listignored, listclean, listunknown = ignored, clean, unknown
626 626 lookup, modified, added, unknown, ignored = [], [], [], [], []
627 627 removed, deleted, clean = [], [], []
628 628
629 629 dmap = self._map
630 630 ladd = lookup.append # aka "unsure"
631 631 madd = modified.append
632 632 aadd = added.append
633 633 uadd = unknown.append
634 634 iadd = ignored.append
635 635 radd = removed.append
636 636 dadd = deleted.append
637 637 cadd = clean.append
638 638
639 639 lnkkind = stat.S_IFLNK
640 640
641 641 for fn, st in self.walk(match, subrepos, listunknown,
642 642 listignored).iteritems():
643 643 if fn not in dmap:
644 644 if (listignored or match.exact(fn)) and self._dirignore(fn):
645 645 if listignored:
646 646 iadd(fn)
647 647 elif listunknown:
648 648 uadd(fn)
649 649 continue
650 650
651 651 state, mode, size, time = dmap[fn]
652 652
653 653 if not st and state in "nma":
654 654 dadd(fn)
655 655 elif state == 'n':
656 656 # The "mode & lnkkind != lnkkind or self._checklink"
657 657 # lines are an expansion of "islink => checklink"
658 658 # where islink means "is this a link?" and checklink
659 659 # means "can we check links?".
660 660 if (size >= 0 and
661 661 (size != st.st_size
662 662 or ((mode ^ st.st_mode) & 0100 and self._checkexec))
663 663 and (mode & lnkkind != lnkkind or self._checklink)
664 664 or size == -2 # other parent
665 665 or fn in self._copymap):
666 666 madd(fn)
667 667 elif (time != int(st.st_mtime)
668 668 and (mode & lnkkind != lnkkind or self._checklink)):
669 669 ladd(fn)
670 670 elif listclean:
671 671 cadd(fn)
672 672 elif state == 'm':
673 673 madd(fn)
674 674 elif state == 'a':
675 675 aadd(fn)
676 676 elif state == 'r':
677 677 radd(fn)
678 678
679 679 return (lookup, modified, added, removed, deleted, unknown, ignored,
680 680 clean)
@@ -1,103 +1,103 b''
1 1 # help.py - help data for mercurial
2 2 #
3 3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import gettext, _
9 9 import sys, os
10 10 import extensions
11 11
12 12
13 13 def moduledoc(file):
14 14 '''return the top-level python documentation for the given file
15 15
16 16 Loosely inspired by pydoc.source_synopsis(), but rewritten to
17 17 handle triple quotes and to return the whole text instead of just
18 18 the synopsis'''
19 19 result = []
20 20
21 21 line = file.readline()
22 22 while line[:1] == '#' or not line.strip():
23 23 line = file.readline()
24 24 if not line:
25 25 break
26 26
27 27 start = line[:3]
28 if start == '"""' or start == "'''":
28 if start in ('"""', "'''"):
29 29 line = line[3:]
30 30 while line:
31 31 if line.rstrip().endswith(start):
32 32 line = line.split(start)[0]
33 33 if line:
34 34 result.append(line)
35 35 break
36 36 elif not line:
37 37 return None # unmatched delimiter
38 38 result.append(line)
39 39 line = file.readline()
40 40 else:
41 41 return None
42 42
43 43 return ''.join(result)
44 44
45 45 def listexts(header, exts, maxlength, indent=1):
46 46 '''return a text listing of the given extensions'''
47 47 if not exts:
48 48 return ''
49 49 result = '\n%s\n\n' % header
50 50 for name, desc in sorted(exts.iteritems()):
51 51 result += '%s%-*s %s\n' % (' ' * indent, maxlength + 2,
52 52 ':%s:' % name, desc)
53 53 return result
54 54
55 55 def extshelp():
56 56 doc = loaddoc('extensions')()
57 57
58 58 exts, maxlength = extensions.enabled()
59 59 doc += listexts(_('enabled extensions:'), exts, maxlength)
60 60
61 61 exts, maxlength = extensions.disabled()
62 62 doc += listexts(_('disabled extensions:'), exts, maxlength)
63 63
64 64 return doc
65 65
66 66 def loaddoc(topic):
67 67 """Return a delayed loader for help/topic.txt."""
68 68
69 69 def loader():
70 70 if hasattr(sys, 'frozen'):
71 71 module = sys.executable
72 72 else:
73 73 module = __file__
74 74 base = os.path.dirname(module)
75 75
76 76 for dir in ('.', '..'):
77 77 docdir = os.path.join(base, dir, 'help')
78 78 if os.path.isdir(docdir):
79 79 break
80 80
81 81 path = os.path.join(docdir, topic + ".txt")
82 82 return gettext(open(path).read())
83 83 return loader
84 84
85 85 helptable = [
86 86 (["config", "hgrc"], _("Configuration Files"), loaddoc('config')),
87 87 (["dates"], _("Date Formats"), loaddoc('dates')),
88 88 (["patterns"], _("File Name Patterns"), loaddoc('patterns')),
89 89 (['environment', 'env'], _('Environment Variables'),
90 90 loaddoc('environment')),
91 91 (['revs', 'revisions'], _('Specifying Single Revisions'),
92 92 loaddoc('revisions')),
93 93 (['mrevs', 'multirevs'], _('Specifying Multiple Revisions'),
94 94 loaddoc('multirevs')),
95 95 (['revsets'], _("Specifying Revision Sets"), loaddoc('revsets')),
96 96 (['diffs'], _('Diff Formats'), loaddoc('diffs')),
97 97 (['templating', 'templates'], _('Template Usage'),
98 98 loaddoc('templates')),
99 99 (['urls'], _('URL Paths'), loaddoc('urls')),
100 100 (["extensions"], _("Using additional features"), extshelp),
101 101 (["hgweb"], _("Configuring hgweb"), loaddoc('hgweb')),
102 102 (["glossary"], _("Glossary"), loaddoc('glossary')),
103 103 ]
@@ -1,539 +1,539 b''
1 1 # merge.py - directory-level update/merge handling for Mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid, nullrev, hex, bin
9 9 from i18n import _
10 10 import util, filemerge, copies, subrepo
11 11 import errno, os, shutil
12 12
13 13 class mergestate(object):
14 14 '''track 3-way merge state of individual files'''
15 15 def __init__(self, repo):
16 16 self._repo = repo
17 17 self._dirty = False
18 18 self._read()
19 19 def reset(self, node=None):
20 20 self._state = {}
21 21 if node:
22 22 self._local = node
23 23 shutil.rmtree(self._repo.join("merge"), True)
24 24 self._dirty = False
25 25 def _read(self):
26 26 self._state = {}
27 27 try:
28 28 f = self._repo.opener("merge/state")
29 29 for i, l in enumerate(f):
30 30 if i == 0:
31 31 self._local = bin(l[:-1])
32 32 else:
33 33 bits = l[:-1].split("\0")
34 34 self._state[bits[0]] = bits[1:]
35 35 except IOError, err:
36 36 if err.errno != errno.ENOENT:
37 37 raise
38 38 self._dirty = False
39 39 def commit(self):
40 40 if self._dirty:
41 41 f = self._repo.opener("merge/state", "w")
42 42 f.write(hex(self._local) + "\n")
43 43 for d, v in self._state.iteritems():
44 44 f.write("\0".join([d] + v) + "\n")
45 45 self._dirty = False
46 46 def add(self, fcl, fco, fca, fd, flags):
47 47 hash = util.sha1(fcl.path()).hexdigest()
48 48 self._repo.opener("merge/" + hash, "w").write(fcl.data())
49 49 self._state[fd] = ['u', hash, fcl.path(), fca.path(),
50 50 hex(fca.filenode()), fco.path(), flags]
51 51 self._dirty = True
52 52 def __contains__(self, dfile):
53 53 return dfile in self._state
54 54 def __getitem__(self, dfile):
55 55 return self._state[dfile][0]
56 56 def __iter__(self):
57 57 l = self._state.keys()
58 58 l.sort()
59 59 for f in l:
60 60 yield f
61 61 def mark(self, dfile, state):
62 62 self._state[dfile][0] = state
63 63 self._dirty = True
64 64 def resolve(self, dfile, wctx, octx):
65 65 if self[dfile] == 'r':
66 66 return 0
67 67 state, hash, lfile, afile, anode, ofile, flags = self._state[dfile]
68 68 f = self._repo.opener("merge/" + hash)
69 69 self._repo.wwrite(dfile, f.read(), flags)
70 70 fcd = wctx[dfile]
71 71 fco = octx[ofile]
72 72 fca = self._repo.filectx(afile, fileid=anode)
73 73 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca)
74 74 if not r:
75 75 self.mark(dfile, 'r')
76 76 return r
77 77
78 78 def _checkunknown(wctx, mctx):
79 79 "check for collisions between unknown files and files in mctx"
80 80 for f in wctx.unknown():
81 81 if f in mctx and mctx[f].cmp(wctx[f]):
82 82 raise util.Abort(_("untracked file in working directory differs"
83 83 " from file in requested revision: '%s'") % f)
84 84
85 85 def _checkcollision(mctx):
86 86 "check for case folding collisions in the destination context"
87 87 folded = {}
88 88 for fn in mctx:
89 89 fold = fn.lower()
90 90 if fold in folded:
91 91 raise util.Abort(_("case-folding collision between %s and %s")
92 92 % (fn, folded[fold]))
93 93 folded[fold] = fn
94 94
95 95 def _forgetremoved(wctx, mctx, branchmerge):
96 96 """
97 97 Forget removed files
98 98
99 99 If we're jumping between revisions (as opposed to merging), and if
100 100 neither the working directory nor the target rev has the file,
101 101 then we need to remove it from the dirstate, to prevent the
102 102 dirstate from listing the file when it is no longer in the
103 103 manifest.
104 104
105 105 If we're merging, and the other revision has removed a file
106 106 that is not present in the working directory, we need to mark it
107 107 as removed.
108 108 """
109 109
110 110 action = []
111 111 state = branchmerge and 'r' or 'f'
112 112 for f in wctx.deleted():
113 113 if f not in mctx:
114 114 action.append((f, state))
115 115
116 116 if not branchmerge:
117 117 for f in wctx.removed():
118 118 if f not in mctx:
119 119 action.append((f, "f"))
120 120
121 121 return action
122 122
123 123 def manifestmerge(repo, p1, p2, pa, overwrite, partial):
124 124 """
125 125 Merge p1 and p2 with ancestor pa and generate merge action list
126 126
127 127 overwrite = whether we clobber working files
128 128 partial = function to filter file lists
129 129 """
130 130
131 131 def fmerge(f, f2, fa):
132 132 """merge flags"""
133 133 a, m, n = ma.flags(fa), m1.flags(f), m2.flags(f2)
134 134 if m == n: # flags agree
135 135 return m # unchanged
136 136 if m and n and not a: # flags set, don't agree, differ from parent
137 137 r = repo.ui.promptchoice(
138 138 _(" conflicting flags for %s\n"
139 139 "(n)one, e(x)ec or sym(l)ink?") % f,
140 140 (_("&None"), _("E&xec"), _("Sym&link")), 0)
141 141 if r == 1:
142 142 return "x" # Exec
143 143 if r == 2:
144 144 return "l" # Symlink
145 145 return ""
146 146 if m and m != a: # changed from a to m
147 147 return m
148 148 if n and n != a: # changed from a to n
149 149 return n
150 150 return '' # flag was cleared
151 151
152 152 def act(msg, m, f, *args):
153 153 repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m))
154 154 action.append((f, m) + args)
155 155
156 156 action, copy = [], {}
157 157
158 158 if overwrite:
159 159 pa = p1
160 160 elif pa == p2: # backwards
161 161 pa = p1.p1()
162 162 elif pa and repo.ui.configbool("merge", "followcopies", True):
163 163 dirs = repo.ui.configbool("merge", "followdirs", True)
164 164 copy, diverge = copies.copies(repo, p1, p2, pa, dirs)
165 165 for of, fl in diverge.iteritems():
166 166 act("divergent renames", "dr", of, fl)
167 167
168 168 repo.ui.note(_("resolving manifests\n"))
169 169 repo.ui.debug(" overwrite %s partial %s\n" % (overwrite, bool(partial)))
170 170 repo.ui.debug(" ancestor %s local %s remote %s\n" % (pa, p1, p2))
171 171
172 172 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
173 173 copied = set(copy.values())
174 174
175 175 if '.hgsubstate' in m1:
176 176 # check whether sub state is modified
177 177 for s in p1.substate:
178 178 if p1.sub(s).dirty():
179 179 m1['.hgsubstate'] += "+"
180 180 break
181 181
182 182 # Compare manifests
183 183 for f, n in m1.iteritems():
184 184 if partial and not partial(f):
185 185 continue
186 186 if f in m2:
187 187 rflags = fmerge(f, f, f)
188 188 a = ma.get(f, nullid)
189 189 if n == m2[f] or m2[f] == a: # same or local newer
190 190 # is file locally modified or flags need changing?
191 191 # dirstate flags may need to be made current
192 192 if m1.flags(f) != rflags or n[20:]:
193 193 act("update permissions", "e", f, rflags)
194 194 elif n == a: # remote newer
195 195 act("remote is newer", "g", f, rflags)
196 196 else: # both changed
197 197 act("versions differ", "m", f, f, f, rflags, False)
198 198 elif f in copied: # files we'll deal with on m2 side
199 199 pass
200 200 elif f in copy:
201 201 f2 = copy[f]
202 202 if f2 not in m2: # directory rename
203 203 act("remote renamed directory to " + f2, "d",
204 204 f, None, f2, m1.flags(f))
205 205 else: # case 2 A,B/B/B or case 4,21 A/B/B
206 206 act("local copied/moved to " + f2, "m",
207 207 f, f2, f, fmerge(f, f2, f2), False)
208 208 elif f in ma: # clean, a different, no remote
209 209 if n != ma[f]:
210 210 if repo.ui.promptchoice(
211 211 _(" local changed %s which remote deleted\n"
212 212 "use (c)hanged version or (d)elete?") % f,
213 213 (_("&Changed"), _("&Delete")), 0):
214 214 act("prompt delete", "r", f)
215 215 else:
216 216 act("prompt keep", "a", f)
217 217 elif n[20:] == "a": # added, no remote
218 218 act("remote deleted", "f", f)
219 219 elif n[20:] != "u":
220 220 act("other deleted", "r", f)
221 221
222 222 for f, n in m2.iteritems():
223 223 if partial and not partial(f):
224 224 continue
225 225 if f in m1 or f in copied: # files already visited
226 226 continue
227 227 if f in copy:
228 228 f2 = copy[f]
229 229 if f2 not in m1: # directory rename
230 230 act("local renamed directory to " + f2, "d",
231 231 None, f, f2, m2.flags(f))
232 232 elif f2 in m2: # rename case 1, A/A,B/A
233 233 act("remote copied to " + f, "m",
234 234 f2, f, f, fmerge(f2, f, f2), False)
235 235 else: # case 3,20 A/B/A
236 236 act("remote moved to " + f, "m",
237 237 f2, f, f, fmerge(f2, f, f2), True)
238 238 elif f not in ma:
239 239 act("remote created", "g", f, m2.flags(f))
240 240 elif n != ma[f]:
241 241 if repo.ui.promptchoice(
242 242 _("remote changed %s which local deleted\n"
243 243 "use (c)hanged version or leave (d)eleted?") % f,
244 244 (_("&Changed"), _("&Deleted")), 0) == 0:
245 245 act("prompt recreating", "g", f, m2.flags(f))
246 246
247 247 return action
248 248
249 249 def actionkey(a):
250 250 return a[1] == 'r' and -1 or 0, a
251 251
252 252 def applyupdates(repo, action, wctx, mctx, actx):
253 253 """apply the merge action list to the working directory
254 254
255 255 wctx is the working copy context
256 256 mctx is the context to be merged into the working copy
257 257 actx is the context of the common ancestor
258 258 """
259 259
260 260 updated, merged, removed, unresolved = 0, 0, 0, 0
261 261 ms = mergestate(repo)
262 262 ms.reset(wctx.parents()[0].node())
263 263 moves = []
264 264 action.sort(key=actionkey)
265 265 substate = wctx.substate # prime
266 266
267 267 # prescan for merges
268 268 u = repo.ui
269 269 for a in action:
270 270 f, m = a[:2]
271 271 if m == 'm': # merge
272 272 f2, fd, flags, move = a[2:]
273 273 if f == '.hgsubstate': # merged internally
274 274 continue
275 275 repo.ui.debug("preserving %s for resolve of %s\n" % (f, fd))
276 276 fcl = wctx[f]
277 277 fco = mctx[f2]
278 278 if mctx == actx: # backwards, use working dir parent as ancestor
279 279 fca = fcl.parents()[0]
280 280 else:
281 281 fca = fcl.ancestor(fco, actx)
282 282 if not fca:
283 283 fca = repo.filectx(f, fileid=nullrev)
284 284 ms.add(fcl, fco, fca, fd, flags)
285 285 if f != fd and move:
286 286 moves.append(f)
287 287
288 288 # remove renamed files after safely stored
289 289 for f in moves:
290 290 if os.path.lexists(repo.wjoin(f)):
291 291 repo.ui.debug("removing %s\n" % f)
292 292 os.unlink(repo.wjoin(f))
293 293
294 294 audit_path = util.path_auditor(repo.root)
295 295
296 296 numupdates = len(action)
297 297 for i, a in enumerate(action):
298 298 f, m = a[:2]
299 299 u.progress(_('updating'), i + 1, item=f, total=numupdates, unit='files')
300 300 if f and f[0] == "/":
301 301 continue
302 302 if m == "r": # remove
303 303 repo.ui.note(_("removing %s\n") % f)
304 304 audit_path(f)
305 305 if f == '.hgsubstate': # subrepo states need updating
306 306 subrepo.submerge(repo, wctx, mctx, wctx)
307 307 try:
308 308 util.unlink(repo.wjoin(f))
309 309 except OSError, inst:
310 310 if inst.errno != errno.ENOENT:
311 311 repo.ui.warn(_("update failed to remove %s: %s!\n") %
312 312 (f, inst.strerror))
313 313 removed += 1
314 314 elif m == "m": # merge
315 315 if f == '.hgsubstate': # subrepo states need updating
316 316 subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx))
317 317 continue
318 318 f2, fd, flags, move = a[2:]
319 319 r = ms.resolve(fd, wctx, mctx)
320 320 if r is not None and r > 0:
321 321 unresolved += 1
322 322 else:
323 323 if r is None:
324 324 updated += 1
325 325 else:
326 326 merged += 1
327 327 util.set_flags(repo.wjoin(fd), 'l' in flags, 'x' in flags)
328 328 if f != fd and move and os.path.lexists(repo.wjoin(f)):
329 329 repo.ui.debug("removing %s\n" % f)
330 330 os.unlink(repo.wjoin(f))
331 331 elif m == "g": # get
332 332 flags = a[2]
333 333 repo.ui.note(_("getting %s\n") % f)
334 334 t = mctx.filectx(f).data()
335 335 repo.wwrite(f, t, flags)
336 336 t = None
337 337 updated += 1
338 338 if f == '.hgsubstate': # subrepo states need updating
339 339 subrepo.submerge(repo, wctx, mctx, wctx)
340 340 elif m == "d": # directory rename
341 341 f2, fd, flags = a[2:]
342 342 if f:
343 343 repo.ui.note(_("moving %s to %s\n") % (f, fd))
344 344 t = wctx.filectx(f).data()
345 345 repo.wwrite(fd, t, flags)
346 346 util.unlink(repo.wjoin(f))
347 347 if f2:
348 348 repo.ui.note(_("getting %s to %s\n") % (f2, fd))
349 349 t = mctx.filectx(f2).data()
350 350 repo.wwrite(fd, t, flags)
351 351 updated += 1
352 352 elif m == "dr": # divergent renames
353 353 fl = a[2]
354 354 repo.ui.warn(_("warning: detected divergent renames of %s to:\n") % f)
355 355 for nf in fl:
356 356 repo.ui.warn(" %s\n" % nf)
357 357 elif m == "e": # exec
358 358 flags = a[2]
359 359 util.set_flags(repo.wjoin(f), 'l' in flags, 'x' in flags)
360 360 ms.commit()
361 361 u.progress(_('updating'), None, total=numupdates, unit='files')
362 362
363 363 return updated, merged, removed, unresolved
364 364
365 365 def recordupdates(repo, action, branchmerge):
366 366 "record merge actions to the dirstate"
367 367
368 368 for a in action:
369 369 f, m = a[:2]
370 370 if m == "r": # remove
371 371 if branchmerge:
372 372 repo.dirstate.remove(f)
373 373 else:
374 374 repo.dirstate.forget(f)
375 375 elif m == "a": # re-add
376 376 if not branchmerge:
377 377 repo.dirstate.add(f)
378 378 elif m == "f": # forget
379 379 repo.dirstate.forget(f)
380 380 elif m == "e": # exec change
381 381 repo.dirstate.normallookup(f)
382 382 elif m == "g": # get
383 383 if branchmerge:
384 384 repo.dirstate.otherparent(f)
385 385 else:
386 386 repo.dirstate.normal(f)
387 387 elif m == "m": # merge
388 388 f2, fd, flag, move = a[2:]
389 389 if branchmerge:
390 390 # We've done a branch merge, mark this file as merged
391 391 # so that we properly record the merger later
392 392 repo.dirstate.merge(fd)
393 393 if f != f2: # copy/rename
394 394 if move:
395 395 repo.dirstate.remove(f)
396 396 if f != fd:
397 397 repo.dirstate.copy(f, fd)
398 398 else:
399 399 repo.dirstate.copy(f2, fd)
400 400 else:
401 401 # We've update-merged a locally modified file, so
402 402 # we set the dirstate to emulate a normal checkout
403 403 # of that file some time in the past. Thus our
404 404 # merge will appear as a normal local file
405 405 # modification.
406 406 if f2 == fd: # file not locally copied/moved
407 407 repo.dirstate.normallookup(fd)
408 408 if move:
409 409 repo.dirstate.forget(f)
410 410 elif m == "d": # directory rename
411 411 f2, fd, flag = a[2:]
412 412 if not f2 and f not in repo.dirstate:
413 413 # untracked file moved
414 414 continue
415 415 if branchmerge:
416 416 repo.dirstate.add(fd)
417 417 if f:
418 418 repo.dirstate.remove(f)
419 419 repo.dirstate.copy(f, fd)
420 420 if f2:
421 421 repo.dirstate.copy(f2, fd)
422 422 else:
423 423 repo.dirstate.normal(fd)
424 424 if f:
425 425 repo.dirstate.forget(f)
426 426
427 427 def update(repo, node, branchmerge, force, partial):
428 428 """
429 429 Perform a merge between the working directory and the given node
430 430
431 431 node = the node to update to, or None if unspecified
432 432 branchmerge = whether to merge between branches
433 433 force = whether to force branch merging or file overwriting
434 434 partial = a function to filter file lists (dirstate not updated)
435 435
436 436 The table below shows all the behaviors of the update command
437 437 given the -c and -C or no options, whether the working directory
438 438 is dirty, whether a revision is specified, and the relationship of
439 439 the parent rev to the target rev (linear, on the same named
440 440 branch, or on another named branch).
441 441
442 442 This logic is tested by test-update-branches.t.
443 443
444 444 -c -C dirty rev | linear same cross
445 445 n n n n | ok (1) x
446 446 n n n y | ok ok ok
447 447 n n y * | merge (2) (2)
448 448 n y * * | --- discard ---
449 449 y n y * | --- (3) ---
450 450 y n n * | --- ok ---
451 451 y y * * | --- (4) ---
452 452
453 453 x = can't happen
454 454 * = don't-care
455 455 1 = abort: crosses branches (use 'hg merge' or 'hg update -c')
456 456 2 = abort: crosses branches (use 'hg merge' to merge or
457 457 use 'hg update -C' to discard changes)
458 458 3 = abort: uncommitted local changes
459 459 4 = incompatible options (checked in commands.py)
460 460 """
461 461
462 462 onode = node
463 463 wlock = repo.wlock()
464 464 try:
465 465 wc = repo[None]
466 466 if node is None:
467 467 # tip of current branch
468 468 try:
469 469 node = repo.branchtags()[wc.branch()]
470 470 except KeyError:
471 471 if wc.branch() == "default": # no default branch!
472 472 node = repo.lookup("tip") # update to tip
473 473 else:
474 474 raise util.Abort(_("branch %s not found") % wc.branch())
475 475 overwrite = force and not branchmerge
476 476 pl = wc.parents()
477 477 p1, p2 = pl[0], repo[node]
478 478 pa = p1.ancestor(p2)
479 479 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
480 480 fastforward = False
481 481
482 482 ### check phase
483 483 if not overwrite and len(pl) > 1:
484 484 raise util.Abort(_("outstanding uncommitted merges"))
485 485 if branchmerge:
486 486 if pa == p2:
487 487 raise util.Abort(_("merging with a working directory ancestor"
488 488 " has no effect"))
489 489 elif pa == p1:
490 490 if p1.branch() != p2.branch():
491 491 fastforward = True
492 492 else:
493 493 raise util.Abort(_("nothing to merge (use 'hg update'"
494 494 " or check 'hg heads')"))
495 495 if not force and (wc.files() or wc.deleted()):
496 496 raise util.Abort(_("outstanding uncommitted changes "
497 497 "(use 'hg status' to list changes)"))
498 498 elif not overwrite:
499 if pa == p1 or pa == p2: # linear
499 if pa in (p1, p2): # linear
500 500 pass # all good
501 501 elif wc.files() or wc.deleted():
502 502 raise util.Abort(_("crosses branches (use 'hg merge' to merge "
503 503 "or use 'hg update -C' to discard changes)"))
504 504 elif onode is None:
505 505 raise util.Abort(_("crosses branches (use 'hg merge' or use "
506 506 "'hg update -c')"))
507 507 else:
508 508 # Allow jumping branches if clean and specific rev given
509 509 overwrite = True
510 510
511 511 ### calculate phase
512 512 action = []
513 513 wc.status(unknown=True) # prime cache
514 514 if not force:
515 515 _checkunknown(wc, p2)
516 516 if not util.checkcase(repo.path):
517 517 _checkcollision(p2)
518 518 action += _forgetremoved(wc, p2, branchmerge)
519 519 action += manifestmerge(repo, wc, p2, pa, overwrite, partial)
520 520
521 521 ### apply phase
522 522 if not branchmerge: # just jump to the new rev
523 523 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
524 524 if not partial:
525 525 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
526 526
527 527 stats = applyupdates(repo, action, wc, p2, pa)
528 528
529 529 if not partial:
530 530 repo.dirstate.setparents(fp1, fp2)
531 531 recordupdates(repo, action, branchmerge)
532 532 if not branchmerge and not fastforward:
533 533 repo.dirstate.setbranch(p2.branch())
534 534 finally:
535 535 wlock.release()
536 536
537 537 if not partial:
538 538 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
539 539 return stats
@@ -1,60 +1,60 b''
1 1 # diffhelpers.py - pure Python implementation of diffhelpers.c
2 2 #
3 3 # Copyright 2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 def addlines(fp, hunk, lena, lenb, a, b):
9 9 while True:
10 10 todoa = lena - len(a)
11 11 todob = lenb - len(b)
12 12 num = max(todoa, todob)
13 13 if num == 0:
14 14 break
15 15 for i in xrange(num):
16 16 s = fp.readline()
17 17 c = s[0]
18 18 if s == "\\ No newline at end of file\n":
19 19 fix_newline(hunk, a, b)
20 20 continue
21 21 if c == "\n":
22 22 # Some patches may be missing the control char
23 23 # on empty lines. Supply a leading space.
24 24 s = " \n"
25 25 hunk.append(s)
26 26 if c == "+":
27 27 b.append(s[1:])
28 28 elif c == "-":
29 29 a.append(s)
30 30 else:
31 31 b.append(s[1:])
32 32 a.append(s)
33 33 return 0
34 34
35 35 def fix_newline(hunk, a, b):
36 36 l = hunk[-1]
37 37 # tolerate CRLF in last line
38 38 if l.endswith('\r\n'):
39 39 hline = l[:-2]
40 40 else:
41 41 hline = l[:-1]
42 42 c = hline[0]
43 43
44 if c == " " or c == "+":
44 if c in " +":
45 45 b[-1] = hline[1:]
46 if c == " " or c == "-":
46 if c in " -":
47 47 a[-1] = hline
48 48 hunk[-1] = hline
49 49 return 0
50 50
51 51
52 52 def testhunk(a, b, bstart):
53 53 alen = len(a)
54 54 blen = len(b)
55 55 if alen > blen - bstart:
56 56 return -1
57 57 for i in xrange(alen):
58 58 if a[i][1:] != b[i + bstart]:
59 59 return -1
60 60 return 0
@@ -1,591 +1,591 b''
1 1 # revset.py - revision set queries for mercurial
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import re
9 9 import parser, util, error, discovery
10 10 import match as matchmod
11 11 from i18n import _
12 12
13 13 elements = {
14 14 "(": (20, ("group", 1, ")"), ("func", 1, ")")),
15 15 "-": (19, ("negate", 19), ("minus", 19)),
16 16 "::": (17, ("dagrangepre", 17), ("dagrange", 17),
17 17 ("dagrangepost", 17)),
18 18 "..": (17, ("dagrangepre", 17), ("dagrange", 17),
19 19 ("dagrangepost", 17)),
20 20 ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)),
21 21 "not": (10, ("not", 10)),
22 22 "!": (10, ("not", 10)),
23 23 "and": (5, None, ("and", 5)),
24 24 "&": (5, None, ("and", 5)),
25 25 "or": (4, None, ("or", 4)),
26 26 "|": (4, None, ("or", 4)),
27 27 "+": (4, None, ("or", 4)),
28 28 ",": (2, None, ("list", 2)),
29 29 ")": (0, None, None),
30 30 "symbol": (0, ("symbol",), None),
31 31 "string": (0, ("string",), None),
32 32 "end": (0, None, None),
33 33 }
34 34
35 35 keywords = set(['and', 'or', 'not'])
36 36
37 37 def tokenize(program):
38 38 pos, l = 0, len(program)
39 39 while pos < l:
40 40 c = program[pos]
41 41 if c.isspace(): # skip inter-token whitespace
42 42 pass
43 43 elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully
44 44 yield ('::', None, pos)
45 45 pos += 1 # skip ahead
46 46 elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully
47 47 yield ('..', None, pos)
48 48 pos += 1 # skip ahead
49 49 elif c in "():,-|&+!": # handle simple operators
50 50 yield (c, None, pos)
51 51 elif c in '"\'': # handle quoted strings
52 52 pos += 1
53 53 s = pos
54 54 while pos < l: # find closing quote
55 55 d = program[pos]
56 56 if d == '\\': # skip over escaped characters
57 57 pos += 2
58 58 continue
59 59 if d == c:
60 60 yield ('string', program[s:pos].decode('string-escape'), s)
61 61 break
62 62 pos += 1
63 63 else:
64 64 raise error.ParseError(_("unterminated string"), s)
65 65 elif c.isalnum() or c in '._' or ord(c) > 127: # gather up a symbol/keyword
66 66 s = pos
67 67 pos += 1
68 68 while pos < l: # find end of symbol
69 69 d = program[pos]
70 70 if not (d.isalnum() or d in "._" or ord(d) > 127):
71 71 break
72 72 if d == '.' and program[pos - 1] == '.': # special case for ..
73 73 pos -= 1
74 74 break
75 75 pos += 1
76 76 sym = program[s:pos]
77 77 if sym in keywords: # operator keywords
78 78 yield (sym, None, s)
79 79 else:
80 80 yield ('symbol', sym, s)
81 81 pos -= 1
82 82 else:
83 83 raise error.ParseError(_("syntax error"), pos)
84 84 pos += 1
85 85 yield ('end', None, pos)
86 86
87 87 # helpers
88 88
89 89 def getstring(x, err):
90 if x and (x[0] == 'string' or x[0] == 'symbol'):
90 if x and x[0] in ('string', 'symbol'):
91 91 return x[1]
92 92 raise error.ParseError(err)
93 93
94 94 def getlist(x):
95 95 if not x:
96 96 return []
97 97 if x[0] == 'list':
98 98 return getlist(x[1]) + [x[2]]
99 99 return [x]
100 100
101 101 def getargs(x, min, max, err):
102 102 l = getlist(x)
103 103 if len(l) < min or len(l) > max:
104 104 raise error.ParseError(err)
105 105 return l
106 106
107 107 def getset(repo, subset, x):
108 108 if not x:
109 109 raise error.ParseError(_("missing argument"))
110 110 return methods[x[0]](repo, subset, *x[1:])
111 111
112 112 # operator methods
113 113
114 114 def stringset(repo, subset, x):
115 115 x = repo[x].rev()
116 116 if x == -1 and len(subset) == len(repo):
117 117 return [-1]
118 118 if x in subset:
119 119 return [x]
120 120 return []
121 121
122 122 def symbolset(repo, subset, x):
123 123 if x in symbols:
124 124 raise error.ParseError(_("can't use %s here") % x)
125 125 return stringset(repo, subset, x)
126 126
127 127 def rangeset(repo, subset, x, y):
128 128 m = getset(repo, subset, x)
129 129 if not m:
130 130 m = getset(repo, range(len(repo)), x)
131 131
132 132 n = getset(repo, subset, y)
133 133 if not n:
134 134 n = getset(repo, range(len(repo)), y)
135 135
136 136 if not m or not n:
137 137 return []
138 138 m, n = m[0], n[-1]
139 139
140 140 if m < n:
141 141 r = range(m, n + 1)
142 142 else:
143 143 r = range(m, n - 1, -1)
144 144 s = set(subset)
145 145 return [x for x in r if x in s]
146 146
147 147 def andset(repo, subset, x, y):
148 148 return getset(repo, getset(repo, subset, x), y)
149 149
150 150 def orset(repo, subset, x, y):
151 151 s = set(getset(repo, subset, x))
152 152 s |= set(getset(repo, [r for r in subset if r not in s], y))
153 153 return [r for r in subset if r in s]
154 154
155 155 def notset(repo, subset, x):
156 156 s = set(getset(repo, subset, x))
157 157 return [r for r in subset if r not in s]
158 158
159 159 def listset(repo, subset, a, b):
160 160 raise error.ParseError(_("can't use a list in this context"))
161 161
162 162 def func(repo, subset, a, b):
163 163 if a[0] == 'symbol' and a[1] in symbols:
164 164 return symbols[a[1]](repo, subset, b)
165 165 raise error.ParseError(_("not a function: %s") % a[1])
166 166
167 167 # functions
168 168
169 169 def p1(repo, subset, x):
170 170 ps = set()
171 171 cl = repo.changelog
172 172 for r in getset(repo, subset, x):
173 173 ps.add(cl.parentrevs(r)[0])
174 174 return [r for r in subset if r in ps]
175 175
176 176 def p2(repo, subset, x):
177 177 ps = set()
178 178 cl = repo.changelog
179 179 for r in getset(repo, subset, x):
180 180 ps.add(cl.parentrevs(r)[1])
181 181 return [r for r in subset if r in ps]
182 182
183 183 def parents(repo, subset, x):
184 184 ps = set()
185 185 cl = repo.changelog
186 186 for r in getset(repo, subset, x):
187 187 ps.update(cl.parentrevs(r))
188 188 return [r for r in subset if r in ps]
189 189
190 190 def maxrev(repo, subset, x):
191 191 s = getset(repo, subset, x)
192 192 if s:
193 193 m = max(s)
194 194 if m in subset:
195 195 return [m]
196 196 return []
197 197
198 198 def minrev(repo, subset, x):
199 199 s = getset(repo, subset, x)
200 200 if s:
201 201 m = min(s)
202 202 if m in subset:
203 203 return [m]
204 204 return []
205 205
206 206 def limit(repo, subset, x):
207 207 l = getargs(x, 2, 2, _("limit wants two arguments"))
208 208 try:
209 209 lim = int(getstring(l[1], _("limit wants a number")))
210 210 except ValueError:
211 211 raise error.ParseError(_("limit expects a number"))
212 212 return getset(repo, subset, l[0])[:lim]
213 213
214 214 def children(repo, subset, x):
215 215 cs = set()
216 216 cl = repo.changelog
217 217 s = set(getset(repo, subset, x))
218 218 for r in xrange(0, len(repo)):
219 219 for p in cl.parentrevs(r):
220 220 if p in s:
221 221 cs.add(r)
222 222 return [r for r in subset if r in cs]
223 223
224 224 def branch(repo, subset, x):
225 225 s = getset(repo, range(len(repo)), x)
226 226 b = set()
227 227 for r in s:
228 228 b.add(repo[r].branch())
229 229 s = set(s)
230 230 return [r for r in subset if r in s or repo[r].branch() in b]
231 231
232 232 def ancestor(repo, subset, x):
233 233 l = getargs(x, 2, 2, _("ancestor wants two arguments"))
234 234 r = range(len(repo))
235 235 a = getset(repo, r, l[0])
236 236 b = getset(repo, r, l[1])
237 237 if len(a) != 1 or len(b) != 1:
238 238 raise error.ParseError(_("ancestor arguments must be single revisions"))
239 239 an = [repo[a[0]].ancestor(repo[b[0]]).rev()]
240 240
241 241 return [r for r in an if r in subset]
242 242
243 243 def ancestors(repo, subset, x):
244 244 args = getset(repo, range(len(repo)), x)
245 245 if not args:
246 246 return []
247 247 s = set(repo.changelog.ancestors(*args)) | set(args)
248 248 return [r for r in subset if r in s]
249 249
250 250 def descendants(repo, subset, x):
251 251 args = getset(repo, range(len(repo)), x)
252 252 if not args:
253 253 return []
254 254 s = set(repo.changelog.descendants(*args)) | set(args)
255 255 return [r for r in subset if r in s]
256 256
257 257 def follow(repo, subset, x):
258 258 getargs(x, 0, 0, _("follow takes no arguments"))
259 259 p = repo['.'].rev()
260 260 s = set(repo.changelog.ancestors(p)) | set([p])
261 261 return [r for r in subset if r in s]
262 262
263 263 def date(repo, subset, x):
264 264 ds = getstring(x, _("date wants a string"))
265 265 dm = util.matchdate(ds)
266 266 return [r for r in subset if dm(repo[r].date()[0])]
267 267
268 268 def keyword(repo, subset, x):
269 269 kw = getstring(x, _("keyword wants a string")).lower()
270 270 l = []
271 271 for r in subset:
272 272 c = repo[r]
273 273 t = " ".join(c.files() + [c.user(), c.description()])
274 274 if kw in t.lower():
275 275 l.append(r)
276 276 return l
277 277
278 278 def grep(repo, subset, x):
279 279 try:
280 280 gr = re.compile(getstring(x, _("grep wants a string")))
281 281 except re.error, e:
282 282 raise error.ParseError(_('invalid match pattern: %s') % e)
283 283 l = []
284 284 for r in subset:
285 285 c = repo[r]
286 286 for e in c.files() + [c.user(), c.description()]:
287 287 if gr.search(e):
288 288 l.append(r)
289 289 continue
290 290 return l
291 291
292 292 def author(repo, subset, x):
293 293 n = getstring(x, _("author wants a string")).lower()
294 294 return [r for r in subset if n in repo[r].user().lower()]
295 295
296 296 def hasfile(repo, subset, x):
297 297 pat = getstring(x, _("file wants a pattern"))
298 298 m = matchmod.match(repo.root, repo.getcwd(), [pat])
299 299 s = []
300 300 for r in subset:
301 301 for f in repo[r].files():
302 302 if m(f):
303 303 s.append(r)
304 304 continue
305 305 return s
306 306
307 307 def contains(repo, subset, x):
308 308 pat = getstring(x, _("contains wants a pattern"))
309 309 m = matchmod.match(repo.root, repo.getcwd(), [pat])
310 310 s = []
311 311 if m.files() == [pat]:
312 312 for r in subset:
313 313 if pat in repo[r]:
314 314 s.append(r)
315 315 continue
316 316 else:
317 317 for r in subset:
318 318 for f in repo[r].manifest():
319 319 if m(f):
320 320 s.append(r)
321 321 continue
322 322 return s
323 323
324 324 def checkstatus(repo, subset, pat, field):
325 325 m = matchmod.match(repo.root, repo.getcwd(), [pat])
326 326 s = []
327 327 fast = (m.files() == [pat])
328 328 for r in subset:
329 329 c = repo[r]
330 330 if fast:
331 331 if pat not in c.files():
332 332 continue
333 333 else:
334 334 for f in c.files():
335 335 if m(f):
336 336 break
337 337 else:
338 338 continue
339 339 files = repo.status(c.p1().node(), c.node())[field]
340 340 if fast:
341 341 if pat in files:
342 342 s.append(r)
343 343 continue
344 344 else:
345 345 for f in files:
346 346 if m(f):
347 347 s.append(r)
348 348 continue
349 349 return s
350 350
351 351 def modifies(repo, subset, x):
352 352 pat = getstring(x, _("modifies wants a pattern"))
353 353 return checkstatus(repo, subset, pat, 0)
354 354
355 355 def adds(repo, subset, x):
356 356 pat = getstring(x, _("adds wants a pattern"))
357 357 return checkstatus(repo, subset, pat, 1)
358 358
359 359 def removes(repo, subset, x):
360 360 pat = getstring(x, _("removes wants a pattern"))
361 361 return checkstatus(repo, subset, pat, 2)
362 362
363 363 def merge(repo, subset, x):
364 364 getargs(x, 0, 0, _("merge takes no arguments"))
365 365 cl = repo.changelog
366 366 return [r for r in subset if cl.parentrevs(r)[1] != -1]
367 367
368 368 def closed(repo, subset, x):
369 369 getargs(x, 0, 0, _("closed takes no arguments"))
370 370 return [r for r in subset if repo[r].extra().get('close')]
371 371
372 372 def head(repo, subset, x):
373 373 getargs(x, 0, 0, _("head takes no arguments"))
374 374 hs = set()
375 375 for b, ls in repo.branchmap().iteritems():
376 376 hs.update(repo[h].rev() for h in ls)
377 377 return [r for r in subset if r in hs]
378 378
379 379 def reverse(repo, subset, x):
380 380 l = getset(repo, subset, x)
381 381 l.reverse()
382 382 return l
383 383
384 384 def present(repo, subset, x):
385 385 try:
386 386 return getset(repo, subset, x)
387 387 except error.RepoLookupError:
388 388 return []
389 389
390 390 def sort(repo, subset, x):
391 391 l = getargs(x, 1, 2, _("sort wants one or two arguments"))
392 392 keys = "rev"
393 393 if len(l) == 2:
394 394 keys = getstring(l[1], _("sort spec must be a string"))
395 395
396 396 s = l[0]
397 397 keys = keys.split()
398 398 l = []
399 399 def invert(s):
400 400 return "".join(chr(255 - ord(c)) for c in s)
401 401 for r in getset(repo, subset, s):
402 402 c = repo[r]
403 403 e = []
404 404 for k in keys:
405 405 if k == 'rev':
406 406 e.append(r)
407 407 elif k == '-rev':
408 408 e.append(-r)
409 409 elif k == 'branch':
410 410 e.append(c.branch())
411 411 elif k == '-branch':
412 412 e.append(invert(c.branch()))
413 413 elif k == 'desc':
414 414 e.append(c.description())
415 415 elif k == '-desc':
416 416 e.append(invert(c.description()))
417 417 elif k in 'user author':
418 418 e.append(c.user())
419 419 elif k in '-user -author':
420 420 e.append(invert(c.user()))
421 421 elif k == 'date':
422 422 e.append(c.date()[0])
423 423 elif k == '-date':
424 424 e.append(-c.date()[0])
425 425 else:
426 426 raise error.ParseError(_("unknown sort key %r") % k)
427 427 e.append(r)
428 428 l.append(e)
429 429 l.sort()
430 430 return [e[-1] for e in l]
431 431
432 432 def getall(repo, subset, x):
433 433 getargs(x, 0, 0, _("all takes no arguments"))
434 434 return subset
435 435
436 436 def heads(repo, subset, x):
437 437 s = getset(repo, subset, x)
438 438 ps = set(parents(repo, subset, x))
439 439 return [r for r in s if r not in ps]
440 440
441 441 def roots(repo, subset, x):
442 442 s = getset(repo, subset, x)
443 443 cs = set(children(repo, subset, x))
444 444 return [r for r in s if r not in cs]
445 445
446 446 def outgoing(repo, subset, x):
447 447 import hg # avoid start-up nasties
448 448 l = getargs(x, 0, 1, _("outgoing wants a repository path"))
449 449 dest = l and getstring(l[0], _("outgoing wants a repository path")) or ''
450 450 dest = repo.ui.expandpath(dest or 'default-push', dest or 'default')
451 451 dest, branches = hg.parseurl(dest)
452 452 other = hg.repository(hg.remoteui(repo, {}), dest)
453 453 repo.ui.pushbuffer()
454 454 o = discovery.findoutgoing(repo, other)
455 455 repo.ui.popbuffer()
456 456 cl = repo.changelog
457 457 o = set([cl.rev(r) for r in repo.changelog.nodesbetween(o, None)[0]])
458 458 return [r for r in subset if r in o]
459 459
460 460 def tagged(repo, subset, x):
461 461 getargs(x, 0, 0, _("tagged takes no arguments"))
462 462 cl = repo.changelog
463 463 s = set([cl.rev(n) for t, n in repo.tagslist() if t != 'tip'])
464 464 return [r for r in subset if r in s]
465 465
466 466 symbols = {
467 467 "adds": adds,
468 468 "all": getall,
469 469 "ancestor": ancestor,
470 470 "ancestors": ancestors,
471 471 "author": author,
472 472 "branch": branch,
473 473 "children": children,
474 474 "closed": closed,
475 475 "contains": contains,
476 476 "date": date,
477 477 "descendants": descendants,
478 478 "file": hasfile,
479 479 "follow": follow,
480 480 "grep": grep,
481 481 "head": head,
482 482 "heads": heads,
483 483 "keyword": keyword,
484 484 "limit": limit,
485 485 "max": maxrev,
486 486 "min": minrev,
487 487 "merge": merge,
488 488 "modifies": modifies,
489 489 "outgoing": outgoing,
490 490 "p1": p1,
491 491 "p2": p2,
492 492 "parents": parents,
493 493 "present": present,
494 494 "removes": removes,
495 495 "reverse": reverse,
496 496 "roots": roots,
497 497 "sort": sort,
498 498 "tagged": tagged,
499 499 "user": author,
500 500 }
501 501
502 502 methods = {
503 503 "range": rangeset,
504 504 "string": stringset,
505 505 "symbol": symbolset,
506 506 "and": andset,
507 507 "or": orset,
508 508 "not": notset,
509 509 "list": listset,
510 510 "func": func,
511 511 }
512 512
513 513 def optimize(x, small):
514 514 if x == None:
515 515 return 0, x
516 516
517 517 smallbonus = 1
518 518 if small:
519 519 smallbonus = .5
520 520
521 521 op = x[0]
522 522 if op == 'minus':
523 523 return optimize(('and', x[1], ('not', x[2])), small)
524 524 elif op == 'dagrange':
525 525 return optimize(('and', ('func', ('symbol', 'descendants'), x[1]),
526 526 ('func', ('symbol', 'ancestors'), x[2])), small)
527 527 elif op == 'dagrangepre':
528 528 return optimize(('func', ('symbol', 'ancestors'), x[1]), small)
529 529 elif op == 'dagrangepost':
530 530 return optimize(('func', ('symbol', 'descendants'), x[1]), small)
531 531 elif op == 'rangepre':
532 532 return optimize(('range', ('string', '0'), x[1]), small)
533 533 elif op == 'rangepost':
534 534 return optimize(('range', x[1], ('string', 'tip')), small)
535 535 elif op == 'negate':
536 536 return optimize(('string',
537 537 '-' + getstring(x[1], _("can't negate that"))), small)
538 538 elif op in 'string symbol negate':
539 539 return smallbonus, x # single revisions are small
540 elif op == 'and' or op == 'dagrange':
540 elif op in ('and', 'dagrange'):
541 541 wa, ta = optimize(x[1], True)
542 542 wb, tb = optimize(x[2], True)
543 543 w = min(wa, wb)
544 544 if wa > wb:
545 545 return w, (op, tb, ta)
546 546 return w, (op, ta, tb)
547 547 elif op == 'or':
548 548 wa, ta = optimize(x[1], False)
549 549 wb, tb = optimize(x[2], False)
550 550 if wb < wa:
551 551 wb, wa = wa, wb
552 552 return max(wa, wb), (op, ta, tb)
553 553 elif op == 'not':
554 554 o = optimize(x[1], not small)
555 555 return o[0], (op, o[1])
556 556 elif op == 'group':
557 557 return optimize(x[1], small)
558 558 elif op in 'range list':
559 559 wa, ta = optimize(x[1], small)
560 560 wb, tb = optimize(x[2], small)
561 561 return wa + wb, (op, ta, tb)
562 562 elif op == 'func':
563 563 f = getstring(x[1], _("not a symbol"))
564 564 wa, ta = optimize(x[2], small)
565 565 if f in "grep date user author keyword branch file outgoing":
566 566 w = 10 # slow
567 567 elif f in "modifies adds removes":
568 568 w = 30 # slower
569 569 elif f == "contains":
570 570 w = 100 # very slow
571 571 elif f == "ancestor":
572 572 w = 1 * smallbonus
573 573 elif f == "reverse limit":
574 574 w = 0
575 575 elif f in "sort":
576 576 w = 10 # assume most sorts look at changelog
577 577 else:
578 578 w = 1
579 579 return w + wa, (op, x[1], ta)
580 580 return 1, x
581 581
582 582 parse = parser.parser(tokenize, elements).parse
583 583
584 584 def match(spec):
585 585 if not spec:
586 586 raise error.ParseError(_("empty query"))
587 587 tree = parse(spec)
588 588 weight, tree = optimize(tree, True)
589 589 def mfunc(repo, subset):
590 590 return getset(repo, subset, tree)
591 591 return mfunc
@@ -1,450 +1,450 b''
1 1 # Copyright (C) 2004, 2005 Canonical Ltd
2 2 #
3 3 # This program is free software; you can redistribute it and/or modify
4 4 # it under the terms of the GNU General Public License as published by
5 5 # the Free Software Foundation; either version 2 of the License, or
6 6 # (at your option) any later version.
7 7 #
8 8 # This program is distributed in the hope that it will be useful,
9 9 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 11 # GNU General Public License for more details.
12 12 #
13 13 # You should have received a copy of the GNU General Public License
14 14 # along with this program; if not, write to the Free Software
15 15 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 16
17 17 # mbp: "you know that thing where cvs gives you conflict markers?"
18 18 # s: "i hate that."
19 19
20 20 from i18n import _
21 21 import util, mdiff
22 22 import sys, os
23 23
24 24 class CantReprocessAndShowBase(Exception):
25 25 pass
26 26
27 27 def intersect(ra, rb):
28 28 """Given two ranges return the range where they intersect or None.
29 29
30 30 >>> intersect((0, 10), (0, 6))
31 31 (0, 6)
32 32 >>> intersect((0, 10), (5, 15))
33 33 (5, 10)
34 34 >>> intersect((0, 10), (10, 15))
35 35 >>> intersect((0, 9), (10, 15))
36 36 >>> intersect((0, 9), (7, 15))
37 37 (7, 9)
38 38 """
39 39 assert ra[0] <= ra[1]
40 40 assert rb[0] <= rb[1]
41 41
42 42 sa = max(ra[0], rb[0])
43 43 sb = min(ra[1], rb[1])
44 44 if sa < sb:
45 45 return sa, sb
46 46 else:
47 47 return None
48 48
49 49 def compare_range(a, astart, aend, b, bstart, bend):
50 50 """Compare a[astart:aend] == b[bstart:bend], without slicing.
51 51 """
52 52 if (aend - astart) != (bend - bstart):
53 53 return False
54 54 for ia, ib in zip(xrange(astart, aend), xrange(bstart, bend)):
55 55 if a[ia] != b[ib]:
56 56 return False
57 57 else:
58 58 return True
59 59
60 60 class Merge3Text(object):
61 61 """3-way merge of texts.
62 62
63 63 Given strings BASE, OTHER, THIS, tries to produce a combined text
64 64 incorporating the changes from both BASE->OTHER and BASE->THIS."""
65 65 def __init__(self, basetext, atext, btext, base=None, a=None, b=None):
66 66 self.basetext = basetext
67 67 self.atext = atext
68 68 self.btext = btext
69 69 if base is None:
70 70 base = mdiff.splitnewlines(basetext)
71 71 if a is None:
72 72 a = mdiff.splitnewlines(atext)
73 73 if b is None:
74 74 b = mdiff.splitnewlines(btext)
75 75 self.base = base
76 76 self.a = a
77 77 self.b = b
78 78
79 79 def merge_lines(self,
80 80 name_a=None,
81 81 name_b=None,
82 82 name_base=None,
83 83 start_marker='<<<<<<<',
84 84 mid_marker='=======',
85 85 end_marker='>>>>>>>',
86 86 base_marker=None,
87 87 reprocess=False):
88 88 """Return merge in cvs-like form.
89 89 """
90 90 self.conflicts = False
91 91 newline = '\n'
92 92 if len(self.a) > 0:
93 93 if self.a[0].endswith('\r\n'):
94 94 newline = '\r\n'
95 95 elif self.a[0].endswith('\r'):
96 96 newline = '\r'
97 97 if base_marker and reprocess:
98 98 raise CantReprocessAndShowBase()
99 99 if name_a:
100 100 start_marker = start_marker + ' ' + name_a
101 101 if name_b:
102 102 end_marker = end_marker + ' ' + name_b
103 103 if name_base and base_marker:
104 104 base_marker = base_marker + ' ' + name_base
105 105 merge_regions = self.merge_regions()
106 106 if reprocess is True:
107 107 merge_regions = self.reprocess_merge_regions(merge_regions)
108 108 for t in merge_regions:
109 109 what = t[0]
110 110 if what == 'unchanged':
111 111 for i in range(t[1], t[2]):
112 112 yield self.base[i]
113 elif what == 'a' or what == 'same':
113 elif what in ('a', 'same'):
114 114 for i in range(t[1], t[2]):
115 115 yield self.a[i]
116 116 elif what == 'b':
117 117 for i in range(t[1], t[2]):
118 118 yield self.b[i]
119 119 elif what == 'conflict':
120 120 self.conflicts = True
121 121 yield start_marker + newline
122 122 for i in range(t[3], t[4]):
123 123 yield self.a[i]
124 124 if base_marker is not None:
125 125 yield base_marker + newline
126 126 for i in range(t[1], t[2]):
127 127 yield self.base[i]
128 128 yield mid_marker + newline
129 129 for i in range(t[5], t[6]):
130 130 yield self.b[i]
131 131 yield end_marker + newline
132 132 else:
133 133 raise ValueError(what)
134 134
135 135 def merge_annotated(self):
136 136 """Return merge with conflicts, showing origin of lines.
137 137
138 138 Most useful for debugging merge.
139 139 """
140 140 for t in self.merge_regions():
141 141 what = t[0]
142 142 if what == 'unchanged':
143 143 for i in range(t[1], t[2]):
144 144 yield 'u | ' + self.base[i]
145 elif what == 'a' or what == 'same':
145 elif what in ('a', 'same'):
146 146 for i in range(t[1], t[2]):
147 147 yield what[0] + ' | ' + self.a[i]
148 148 elif what == 'b':
149 149 for i in range(t[1], t[2]):
150 150 yield 'b | ' + self.b[i]
151 151 elif what == 'conflict':
152 152 yield '<<<<\n'
153 153 for i in range(t[3], t[4]):
154 154 yield 'A | ' + self.a[i]
155 155 yield '----\n'
156 156 for i in range(t[5], t[6]):
157 157 yield 'B | ' + self.b[i]
158 158 yield '>>>>\n'
159 159 else:
160 160 raise ValueError(what)
161 161
162 162 def merge_groups(self):
163 163 """Yield sequence of line groups. Each one is a tuple:
164 164
165 165 'unchanged', lines
166 166 Lines unchanged from base
167 167
168 168 'a', lines
169 169 Lines taken from a
170 170
171 171 'same', lines
172 172 Lines taken from a (and equal to b)
173 173
174 174 'b', lines
175 175 Lines taken from b
176 176
177 177 'conflict', base_lines, a_lines, b_lines
178 178 Lines from base were changed to either a or b and conflict.
179 179 """
180 180 for t in self.merge_regions():
181 181 what = t[0]
182 182 if what == 'unchanged':
183 183 yield what, self.base[t[1]:t[2]]
184 elif what == 'a' or what == 'same':
184 elif what in ('a', 'same'):
185 185 yield what, self.a[t[1]:t[2]]
186 186 elif what == 'b':
187 187 yield what, self.b[t[1]:t[2]]
188 188 elif what == 'conflict':
189 189 yield (what,
190 190 self.base[t[1]:t[2]],
191 191 self.a[t[3]:t[4]],
192 192 self.b[t[5]:t[6]])
193 193 else:
194 194 raise ValueError(what)
195 195
196 196 def merge_regions(self):
197 197 """Return sequences of matching and conflicting regions.
198 198
199 199 This returns tuples, where the first value says what kind we
200 200 have:
201 201
202 202 'unchanged', start, end
203 203 Take a region of base[start:end]
204 204
205 205 'same', astart, aend
206 206 b and a are different from base but give the same result
207 207
208 208 'a', start, end
209 209 Non-clashing insertion from a[start:end]
210 210
211 211 Method is as follows:
212 212
213 213 The two sequences align only on regions which match the base
214 214 and both descendents. These are found by doing a two-way diff
215 215 of each one against the base, and then finding the
216 216 intersections between those regions. These "sync regions"
217 217 are by definition unchanged in both and easily dealt with.
218 218
219 219 The regions in between can be in any of three cases:
220 220 conflicted, or changed on only one side.
221 221 """
222 222
223 223 # section a[0:ia] has been disposed of, etc
224 224 iz = ia = ib = 0
225 225
226 226 for zmatch, zend, amatch, aend, bmatch, bend in self.find_sync_regions():
227 227 #print 'match base [%d:%d]' % (zmatch, zend)
228 228
229 229 matchlen = zend - zmatch
230 230 assert matchlen >= 0
231 231 assert matchlen == (aend - amatch)
232 232 assert matchlen == (bend - bmatch)
233 233
234 234 len_a = amatch - ia
235 235 len_b = bmatch - ib
236 236 len_base = zmatch - iz
237 237 assert len_a >= 0
238 238 assert len_b >= 0
239 239 assert len_base >= 0
240 240
241 241 #print 'unmatched a=%d, b=%d' % (len_a, len_b)
242 242
243 243 if len_a or len_b:
244 244 # try to avoid actually slicing the lists
245 245 equal_a = compare_range(self.a, ia, amatch,
246 246 self.base, iz, zmatch)
247 247 equal_b = compare_range(self.b, ib, bmatch,
248 248 self.base, iz, zmatch)
249 249 same = compare_range(self.a, ia, amatch,
250 250 self.b, ib, bmatch)
251 251
252 252 if same:
253 253 yield 'same', ia, amatch
254 254 elif equal_a and not equal_b:
255 255 yield 'b', ib, bmatch
256 256 elif equal_b and not equal_a:
257 257 yield 'a', ia, amatch
258 258 elif not equal_a and not equal_b:
259 259 yield 'conflict', iz, zmatch, ia, amatch, ib, bmatch
260 260 else:
261 261 raise AssertionError("can't handle a=b=base but unmatched")
262 262
263 263 ia = amatch
264 264 ib = bmatch
265 265 iz = zmatch
266 266
267 267 # if the same part of the base was deleted on both sides
268 268 # that's OK, we can just skip it.
269 269
270 270
271 271 if matchlen > 0:
272 272 assert ia == amatch
273 273 assert ib == bmatch
274 274 assert iz == zmatch
275 275
276 276 yield 'unchanged', zmatch, zend
277 277 iz = zend
278 278 ia = aend
279 279 ib = bend
280 280
281 281 def reprocess_merge_regions(self, merge_regions):
282 282 """Where there are conflict regions, remove the agreed lines.
283 283
284 284 Lines where both A and B have made the same changes are
285 285 eliminated.
286 286 """
287 287 for region in merge_regions:
288 288 if region[0] != "conflict":
289 289 yield region
290 290 continue
291 291 type, iz, zmatch, ia, amatch, ib, bmatch = region
292 292 a_region = self.a[ia:amatch]
293 293 b_region = self.b[ib:bmatch]
294 294 matches = mdiff.get_matching_blocks(''.join(a_region),
295 295 ''.join(b_region))
296 296 next_a = ia
297 297 next_b = ib
298 298 for region_ia, region_ib, region_len in matches[:-1]:
299 299 region_ia += ia
300 300 region_ib += ib
301 301 reg = self.mismatch_region(next_a, region_ia, next_b,
302 302 region_ib)
303 303 if reg is not None:
304 304 yield reg
305 305 yield 'same', region_ia, region_len + region_ia
306 306 next_a = region_ia + region_len
307 307 next_b = region_ib + region_len
308 308 reg = self.mismatch_region(next_a, amatch, next_b, bmatch)
309 309 if reg is not None:
310 310 yield reg
311 311
312 312 def mismatch_region(next_a, region_ia, next_b, region_ib):
313 313 if next_a < region_ia or next_b < region_ib:
314 314 return 'conflict', None, None, next_a, region_ia, next_b, region_ib
315 315 mismatch_region = staticmethod(mismatch_region)
316 316
317 317 def find_sync_regions(self):
318 318 """Return a list of sync regions, where both descendents match the base.
319 319
320 320 Generates a list of (base1, base2, a1, a2, b1, b2). There is
321 321 always a zero-length sync region at the end of all the files.
322 322 """
323 323
324 324 ia = ib = 0
325 325 amatches = mdiff.get_matching_blocks(self.basetext, self.atext)
326 326 bmatches = mdiff.get_matching_blocks(self.basetext, self.btext)
327 327 len_a = len(amatches)
328 328 len_b = len(bmatches)
329 329
330 330 sl = []
331 331
332 332 while ia < len_a and ib < len_b:
333 333 abase, amatch, alen = amatches[ia]
334 334 bbase, bmatch, blen = bmatches[ib]
335 335
336 336 # there is an unconflicted block at i; how long does it
337 337 # extend? until whichever one ends earlier.
338 338 i = intersect((abase, abase + alen), (bbase, bbase + blen))
339 339 if i:
340 340 intbase = i[0]
341 341 intend = i[1]
342 342 intlen = intend - intbase
343 343
344 344 # found a match of base[i[0], i[1]]; this may be less than
345 345 # the region that matches in either one
346 346 assert intlen <= alen
347 347 assert intlen <= blen
348 348 assert abase <= intbase
349 349 assert bbase <= intbase
350 350
351 351 asub = amatch + (intbase - abase)
352 352 bsub = bmatch + (intbase - bbase)
353 353 aend = asub + intlen
354 354 bend = bsub + intlen
355 355
356 356 assert self.base[intbase:intend] == self.a[asub:aend], \
357 357 (self.base[intbase:intend], self.a[asub:aend])
358 358
359 359 assert self.base[intbase:intend] == self.b[bsub:bend]
360 360
361 361 sl.append((intbase, intend,
362 362 asub, aend,
363 363 bsub, bend))
364 364
365 365 # advance whichever one ends first in the base text
366 366 if (abase + alen) < (bbase + blen):
367 367 ia += 1
368 368 else:
369 369 ib += 1
370 370
371 371 intbase = len(self.base)
372 372 abase = len(self.a)
373 373 bbase = len(self.b)
374 374 sl.append((intbase, intbase, abase, abase, bbase, bbase))
375 375
376 376 return sl
377 377
378 378 def find_unconflicted(self):
379 379 """Return a list of ranges in base that are not conflicted."""
380 380 am = mdiff.get_matching_blocks(self.basetext, self.atext)
381 381 bm = mdiff.get_matching_blocks(self.basetext, self.btext)
382 382
383 383 unc = []
384 384
385 385 while am and bm:
386 386 # there is an unconflicted block at i; how long does it
387 387 # extend? until whichever one ends earlier.
388 388 a1 = am[0][0]
389 389 a2 = a1 + am[0][2]
390 390 b1 = bm[0][0]
391 391 b2 = b1 + bm[0][2]
392 392 i = intersect((a1, a2), (b1, b2))
393 393 if i:
394 394 unc.append(i)
395 395
396 396 if a2 < b2:
397 397 del am[0]
398 398 else:
399 399 del bm[0]
400 400
401 401 return unc
402 402
403 403 def simplemerge(ui, local, base, other, **opts):
404 404 def readfile(filename):
405 405 f = open(filename, "rb")
406 406 text = f.read()
407 407 f.close()
408 408 if util.binary(text):
409 409 msg = _("%s looks like a binary file.") % filename
410 410 if not opts.get('text'):
411 411 raise util.Abort(msg)
412 412 elif not opts.get('quiet'):
413 413 ui.warn(_('warning: %s\n') % msg)
414 414 return text
415 415
416 416 name_a = local
417 417 name_b = other
418 418 labels = opts.get('label', [])
419 419 if labels:
420 420 name_a = labels.pop(0)
421 421 if labels:
422 422 name_b = labels.pop(0)
423 423 if labels:
424 424 raise util.Abort(_("can only specify two labels."))
425 425
426 426 localtext = readfile(local)
427 427 basetext = readfile(base)
428 428 othertext = readfile(other)
429 429
430 430 local = os.path.realpath(local)
431 431 if not opts.get('print'):
432 432 opener = util.opener(os.path.dirname(local))
433 433 out = opener(os.path.basename(local), "w", atomictemp=True)
434 434 else:
435 435 out = sys.stdout
436 436
437 437 reprocess = not opts.get('no_minimal')
438 438
439 439 m3 = Merge3Text(basetext, localtext, othertext)
440 440 for line in m3.merge_lines(name_a=name_a, name_b=name_b,
441 441 reprocess=reprocess):
442 442 out.write(line)
443 443
444 444 if not opts.get('print'):
445 445 out.rename()
446 446
447 447 if m3.conflicts:
448 448 if not opts.get('quiet'):
449 449 ui.warn(_("warning: conflicts during merge.\n"))
450 450 return 1
@@ -1,1448 +1,1448 b''
1 1 # util.py - Mercurial utility functions and platform specfic implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specfic implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 from i18n import _
17 17 import error, osutil, encoding
18 18 import errno, re, shutil, sys, tempfile, traceback
19 19 import os, stat, time, calendar, textwrap, unicodedata, signal
20 20 import imp, socket
21 21
22 22 # Python compatibility
23 23
24 24 def sha1(s):
25 25 return _fastsha1(s)
26 26
27 27 def _fastsha1(s):
28 28 # This function will import sha1 from hashlib or sha (whichever is
29 29 # available) and overwrite itself with it on the first call.
30 30 # Subsequent calls will go directly to the imported function.
31 31 if sys.version_info >= (2, 5):
32 32 from hashlib import sha1 as _sha1
33 33 else:
34 34 from sha import sha as _sha1
35 35 global _fastsha1, sha1
36 36 _fastsha1 = sha1 = _sha1
37 37 return _sha1(s)
38 38
39 39 import __builtin__
40 40
41 41 if sys.version_info[0] < 3:
42 42 def fakebuffer(sliceable, offset=0):
43 43 return sliceable[offset:]
44 44 else:
45 45 def fakebuffer(sliceable, offset=0):
46 46 return memoryview(sliceable)[offset:]
47 47 try:
48 48 buffer
49 49 except NameError:
50 50 __builtin__.buffer = fakebuffer
51 51
52 52 import subprocess
53 53 closefds = os.name == 'posix'
54 54
55 55 def popen2(cmd, env=None, newlines=False):
56 56 # Setting bufsize to -1 lets the system decide the buffer size.
57 57 # The default for bufsize is 0, meaning unbuffered. This leads to
58 58 # poor performance on Mac OS X: http://bugs.python.org/issue4194
59 59 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
60 60 close_fds=closefds,
61 61 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
62 62 universal_newlines=newlines,
63 63 env=env)
64 64 return p.stdin, p.stdout
65 65
66 66 def popen3(cmd, env=None, newlines=False):
67 67 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
68 68 close_fds=closefds,
69 69 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
70 70 stderr=subprocess.PIPE,
71 71 universal_newlines=newlines,
72 72 env=env)
73 73 return p.stdin, p.stdout, p.stderr
74 74
75 75 def version():
76 76 """Return version information if available."""
77 77 try:
78 78 import __version__
79 79 return __version__.version
80 80 except ImportError:
81 81 return 'unknown'
82 82
83 83 # used by parsedate
84 84 defaultdateformats = (
85 85 '%Y-%m-%d %H:%M:%S',
86 86 '%Y-%m-%d %I:%M:%S%p',
87 87 '%Y-%m-%d %H:%M',
88 88 '%Y-%m-%d %I:%M%p',
89 89 '%Y-%m-%d',
90 90 '%m-%d',
91 91 '%m/%d',
92 92 '%m/%d/%y',
93 93 '%m/%d/%Y',
94 94 '%a %b %d %H:%M:%S %Y',
95 95 '%a %b %d %I:%M:%S%p %Y',
96 96 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
97 97 '%b %d %H:%M:%S %Y',
98 98 '%b %d %I:%M:%S%p %Y',
99 99 '%b %d %H:%M:%S',
100 100 '%b %d %I:%M:%S%p',
101 101 '%b %d %H:%M',
102 102 '%b %d %I:%M%p',
103 103 '%b %d %Y',
104 104 '%b %d',
105 105 '%H:%M:%S',
106 106 '%I:%M:%S%p',
107 107 '%H:%M',
108 108 '%I:%M%p',
109 109 )
110 110
111 111 extendeddateformats = defaultdateformats + (
112 112 "%Y",
113 113 "%Y-%m",
114 114 "%b",
115 115 "%b %Y",
116 116 )
117 117
118 118 def cachefunc(func):
119 119 '''cache the result of function calls'''
120 120 # XXX doesn't handle keywords args
121 121 cache = {}
122 122 if func.func_code.co_argcount == 1:
123 123 # we gain a small amount of time because
124 124 # we don't need to pack/unpack the list
125 125 def f(arg):
126 126 if arg not in cache:
127 127 cache[arg] = func(arg)
128 128 return cache[arg]
129 129 else:
130 130 def f(*args):
131 131 if args not in cache:
132 132 cache[args] = func(*args)
133 133 return cache[args]
134 134
135 135 return f
136 136
137 137 def lrucachefunc(func):
138 138 '''cache most recent results of function calls'''
139 139 cache = {}
140 140 order = []
141 141 if func.func_code.co_argcount == 1:
142 142 def f(arg):
143 143 if arg not in cache:
144 144 if len(cache) > 20:
145 145 del cache[order.pop(0)]
146 146 cache[arg] = func(arg)
147 147 else:
148 148 order.remove(arg)
149 149 order.append(arg)
150 150 return cache[arg]
151 151 else:
152 152 def f(*args):
153 153 if args not in cache:
154 154 if len(cache) > 20:
155 155 del cache[order.pop(0)]
156 156 cache[args] = func(*args)
157 157 else:
158 158 order.remove(args)
159 159 order.append(args)
160 160 return cache[args]
161 161
162 162 return f
163 163
164 164 class propertycache(object):
165 165 def __init__(self, func):
166 166 self.func = func
167 167 self.name = func.__name__
168 168 def __get__(self, obj, type=None):
169 169 result = self.func(obj)
170 170 setattr(obj, self.name, result)
171 171 return result
172 172
173 173 def pipefilter(s, cmd):
174 174 '''filter string S through command CMD, returning its output'''
175 175 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
176 176 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
177 177 pout, perr = p.communicate(s)
178 178 return pout
179 179
180 180 def tempfilter(s, cmd):
181 181 '''filter string S through a pair of temporary files with CMD.
182 182 CMD is used as a template to create the real command to be run,
183 183 with the strings INFILE and OUTFILE replaced by the real names of
184 184 the temporary files generated.'''
185 185 inname, outname = None, None
186 186 try:
187 187 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
188 188 fp = os.fdopen(infd, 'wb')
189 189 fp.write(s)
190 190 fp.close()
191 191 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
192 192 os.close(outfd)
193 193 cmd = cmd.replace('INFILE', inname)
194 194 cmd = cmd.replace('OUTFILE', outname)
195 195 code = os.system(cmd)
196 196 if sys.platform == 'OpenVMS' and code & 1:
197 197 code = 0
198 198 if code:
199 199 raise Abort(_("command '%s' failed: %s") %
200 200 (cmd, explain_exit(code)))
201 201 return open(outname, 'rb').read()
202 202 finally:
203 203 try:
204 204 if inname:
205 205 os.unlink(inname)
206 206 except:
207 207 pass
208 208 try:
209 209 if outname:
210 210 os.unlink(outname)
211 211 except:
212 212 pass
213 213
214 214 filtertable = {
215 215 'tempfile:': tempfilter,
216 216 'pipe:': pipefilter,
217 217 }
218 218
219 219 def filter(s, cmd):
220 220 "filter a string through a command that transforms its input to its output"
221 221 for name, fn in filtertable.iteritems():
222 222 if cmd.startswith(name):
223 223 return fn(s, cmd[len(name):].lstrip())
224 224 return pipefilter(s, cmd)
225 225
226 226 def binary(s):
227 227 """return true if a string is binary data"""
228 228 return bool(s and '\0' in s)
229 229
230 230 def increasingchunks(source, min=1024, max=65536):
231 231 '''return no less than min bytes per chunk while data remains,
232 232 doubling min after each chunk until it reaches max'''
233 233 def log2(x):
234 234 if not x:
235 235 return 0
236 236 i = 0
237 237 while x:
238 238 x >>= 1
239 239 i += 1
240 240 return i - 1
241 241
242 242 buf = []
243 243 blen = 0
244 244 for chunk in source:
245 245 buf.append(chunk)
246 246 blen += len(chunk)
247 247 if blen >= min:
248 248 if min < max:
249 249 min = min << 1
250 250 nmin = 1 << log2(blen)
251 251 if nmin > min:
252 252 min = nmin
253 253 if min > max:
254 254 min = max
255 255 yield ''.join(buf)
256 256 blen = 0
257 257 buf = []
258 258 if buf:
259 259 yield ''.join(buf)
260 260
261 261 Abort = error.Abort
262 262
263 263 def always(fn):
264 264 return True
265 265
266 266 def never(fn):
267 267 return False
268 268
269 269 def pathto(root, n1, n2):
270 270 '''return the relative path from one place to another.
271 271 root should use os.sep to separate directories
272 272 n1 should use os.sep to separate directories
273 273 n2 should use "/" to separate directories
274 274 returns an os.sep-separated path.
275 275
276 276 If n1 is a relative path, it's assumed it's
277 277 relative to root.
278 278 n2 should always be relative to root.
279 279 '''
280 280 if not n1:
281 281 return localpath(n2)
282 282 if os.path.isabs(n1):
283 283 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
284 284 return os.path.join(root, localpath(n2))
285 285 n2 = '/'.join((pconvert(root), n2))
286 286 a, b = splitpath(n1), n2.split('/')
287 287 a.reverse()
288 288 b.reverse()
289 289 while a and b and a[-1] == b[-1]:
290 290 a.pop()
291 291 b.pop()
292 292 b.reverse()
293 293 return os.sep.join((['..'] * len(a)) + b) or '.'
294 294
295 295 def canonpath(root, cwd, myname, auditor=None):
296 296 """return the canonical path of myname, given cwd and root"""
297 297 if endswithsep(root):
298 298 rootsep = root
299 299 else:
300 300 rootsep = root + os.sep
301 301 name = myname
302 302 if not os.path.isabs(name):
303 303 name = os.path.join(root, cwd, name)
304 304 name = os.path.normpath(name)
305 305 if auditor is None:
306 306 auditor = path_auditor(root)
307 307 if name != rootsep and name.startswith(rootsep):
308 308 name = name[len(rootsep):]
309 309 auditor(name)
310 310 return pconvert(name)
311 311 elif name == root:
312 312 return ''
313 313 else:
314 314 # Determine whether `name' is in the hierarchy at or beneath `root',
315 315 # by iterating name=dirname(name) until that causes no change (can't
316 316 # check name == '/', because that doesn't work on windows). For each
317 317 # `name', compare dev/inode numbers. If they match, the list `rel'
318 318 # holds the reversed list of components making up the relative file
319 319 # name we want.
320 320 root_st = os.stat(root)
321 321 rel = []
322 322 while True:
323 323 try:
324 324 name_st = os.stat(name)
325 325 except OSError:
326 326 break
327 327 if samestat(name_st, root_st):
328 328 if not rel:
329 329 # name was actually the same as root (maybe a symlink)
330 330 return ''
331 331 rel.reverse()
332 332 name = os.path.join(*rel)
333 333 auditor(name)
334 334 return pconvert(name)
335 335 dirname, basename = os.path.split(name)
336 336 rel.append(basename)
337 337 if dirname == name:
338 338 break
339 339 name = dirname
340 340
341 341 raise Abort('%s not under root' % myname)
342 342
343 343 _hgexecutable = None
344 344
345 345 def main_is_frozen():
346 346 """return True if we are a frozen executable.
347 347
348 348 The code supports py2exe (most common, Windows only) and tools/freeze
349 349 (portable, not much used).
350 350 """
351 351 return (hasattr(sys, "frozen") or # new py2exe
352 352 hasattr(sys, "importers") or # old py2exe
353 353 imp.is_frozen("__main__")) # tools/freeze
354 354
355 355 def hgexecutable():
356 356 """return location of the 'hg' executable.
357 357
358 358 Defaults to $HG or 'hg' in the search path.
359 359 """
360 360 if _hgexecutable is None:
361 361 hg = os.environ.get('HG')
362 362 if hg:
363 363 set_hgexecutable(hg)
364 364 elif main_is_frozen():
365 365 set_hgexecutable(sys.executable)
366 366 else:
367 367 exe = find_exe('hg') or os.path.basename(sys.argv[0])
368 368 set_hgexecutable(exe)
369 369 return _hgexecutable
370 370
371 371 def set_hgexecutable(path):
372 372 """set location of the 'hg' executable"""
373 373 global _hgexecutable
374 374 _hgexecutable = path
375 375
376 376 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
377 377 '''enhanced shell command execution.
378 378 run with environment maybe modified, maybe in different dir.
379 379
380 380 if command fails and onerr is None, return status. if ui object,
381 381 print error message and return status, else raise onerr object as
382 382 exception.
383 383
384 384 if out is specified, it is assumed to be a file-like object that has a
385 385 write() method. stdout and stderr will be redirected to out.'''
386 386 def py2shell(val):
387 387 'convert python object into string that is useful to shell'
388 388 if val is None or val is False:
389 389 return '0'
390 390 if val is True:
391 391 return '1'
392 392 return str(val)
393 393 origcmd = cmd
394 394 if os.name == 'nt':
395 395 cmd = '"%s"' % cmd
396 396 env = dict(os.environ)
397 397 env.update((k, py2shell(v)) for k, v in environ.iteritems())
398 398 env['HG'] = hgexecutable()
399 399 if out is None:
400 400 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
401 401 env=env, cwd=cwd)
402 402 else:
403 403 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
404 404 env=env, cwd=cwd, stdout=subprocess.PIPE,
405 405 stderr=subprocess.STDOUT)
406 406 for line in proc.stdout:
407 407 out.write(line)
408 408 proc.wait()
409 409 rc = proc.returncode
410 410 if sys.platform == 'OpenVMS' and rc & 1:
411 411 rc = 0
412 412 if rc and onerr:
413 413 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
414 414 explain_exit(rc)[0])
415 415 if errprefix:
416 416 errmsg = '%s: %s' % (errprefix, errmsg)
417 417 try:
418 418 onerr.warn(errmsg + '\n')
419 419 except AttributeError:
420 420 raise onerr(errmsg)
421 421 return rc
422 422
423 423 def checksignature(func):
424 424 '''wrap a function with code to check for calling errors'''
425 425 def check(*args, **kwargs):
426 426 try:
427 427 return func(*args, **kwargs)
428 428 except TypeError:
429 429 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
430 430 raise error.SignatureError
431 431 raise
432 432
433 433 return check
434 434
435 435 def unlink(f):
436 436 """unlink and remove the directory if it is empty"""
437 437 os.unlink(f)
438 438 # try removing directories that might now be empty
439 439 try:
440 440 os.removedirs(os.path.dirname(f))
441 441 except OSError:
442 442 pass
443 443
444 444 def copyfile(src, dest):
445 445 "copy a file, preserving mode and atime/mtime"
446 446 if os.path.islink(src):
447 447 try:
448 448 os.unlink(dest)
449 449 except:
450 450 pass
451 451 os.symlink(os.readlink(src), dest)
452 452 else:
453 453 try:
454 454 shutil.copyfile(src, dest)
455 455 shutil.copystat(src, dest)
456 456 except shutil.Error, inst:
457 457 raise Abort(str(inst))
458 458
459 459 def copyfiles(src, dst, hardlink=None):
460 460 """Copy a directory tree using hardlinks if possible"""
461 461
462 462 if hardlink is None:
463 463 hardlink = (os.stat(src).st_dev ==
464 464 os.stat(os.path.dirname(dst)).st_dev)
465 465
466 466 num = 0
467 467 if os.path.isdir(src):
468 468 os.mkdir(dst)
469 469 for name, kind in osutil.listdir(src):
470 470 srcname = os.path.join(src, name)
471 471 dstname = os.path.join(dst, name)
472 472 hardlink, n = copyfiles(srcname, dstname, hardlink)
473 473 num += n
474 474 else:
475 475 if hardlink:
476 476 try:
477 477 os_link(src, dst)
478 478 except (IOError, OSError):
479 479 hardlink = False
480 480 shutil.copy(src, dst)
481 481 else:
482 482 shutil.copy(src, dst)
483 483 num += 1
484 484
485 485 return hardlink, num
486 486
487 487 class path_auditor(object):
488 488 '''ensure that a filesystem path contains no banned components.
489 489 the following properties of a path are checked:
490 490
491 491 - under top-level .hg
492 492 - starts at the root of a windows drive
493 493 - contains ".."
494 494 - traverses a symlink (e.g. a/symlink_here/b)
495 495 - inside a nested repository (a callback can be used to approve
496 496 some nested repositories, e.g., subrepositories)
497 497 '''
498 498
499 499 def __init__(self, root, callback=None):
500 500 self.audited = set()
501 501 self.auditeddir = set()
502 502 self.root = root
503 503 self.callback = callback
504 504
505 505 def __call__(self, path):
506 506 if path in self.audited:
507 507 return
508 508 normpath = os.path.normcase(path)
509 509 parts = splitpath(normpath)
510 510 if (os.path.splitdrive(path)[0]
511 511 or parts[0].lower() in ('.hg', '.hg.', '')
512 512 or os.pardir in parts):
513 513 raise Abort(_("path contains illegal component: %s") % path)
514 514 if '.hg' in path.lower():
515 515 lparts = [p.lower() for p in parts]
516 516 for p in '.hg', '.hg.':
517 517 if p in lparts[1:]:
518 518 pos = lparts.index(p)
519 519 base = os.path.join(*parts[:pos])
520 520 raise Abort(_('path %r is inside repo %r') % (path, base))
521 521 def check(prefix):
522 522 curpath = os.path.join(self.root, prefix)
523 523 try:
524 524 st = os.lstat(curpath)
525 525 except OSError, err:
526 526 # EINVAL can be raised as invalid path syntax under win32.
527 527 # They must be ignored for patterns can be checked too.
528 528 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
529 529 raise
530 530 else:
531 531 if stat.S_ISLNK(st.st_mode):
532 532 raise Abort(_('path %r traverses symbolic link %r') %
533 533 (path, prefix))
534 534 elif (stat.S_ISDIR(st.st_mode) and
535 535 os.path.isdir(os.path.join(curpath, '.hg'))):
536 536 if not self.callback or not self.callback(curpath):
537 537 raise Abort(_('path %r is inside repo %r') %
538 538 (path, prefix))
539 539 parts.pop()
540 540 prefixes = []
541 541 while parts:
542 542 prefix = os.sep.join(parts)
543 543 if prefix in self.auditeddir:
544 544 break
545 545 check(prefix)
546 546 prefixes.append(prefix)
547 547 parts.pop()
548 548
549 549 self.audited.add(path)
550 550 # only add prefixes to the cache after checking everything: we don't
551 551 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
552 552 self.auditeddir.update(prefixes)
553 553
554 554 def nlinks(pathname):
555 555 """Return number of hardlinks for the given file."""
556 556 return os.lstat(pathname).st_nlink
557 557
558 558 if hasattr(os, 'link'):
559 559 os_link = os.link
560 560 else:
561 561 def os_link(src, dst):
562 562 raise OSError(0, _("Hardlinks not supported"))
563 563
564 564 def lookup_reg(key, name=None, scope=None):
565 565 return None
566 566
567 567 def hidewindow():
568 568 """Hide current shell window.
569 569
570 570 Used to hide the window opened when starting asynchronous
571 571 child process under Windows, unneeded on other systems.
572 572 """
573 573 pass
574 574
575 575 if os.name == 'nt':
576 576 from windows import *
577 577 else:
578 578 from posix import *
579 579
580 580 def makelock(info, pathname):
581 581 try:
582 582 return os.symlink(info, pathname)
583 583 except OSError, why:
584 584 if why.errno == errno.EEXIST:
585 585 raise
586 586 except AttributeError: # no symlink in os
587 587 pass
588 588
589 589 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
590 590 os.write(ld, info)
591 591 os.close(ld)
592 592
593 593 def readlock(pathname):
594 594 try:
595 595 return os.readlink(pathname)
596 596 except OSError, why:
597 597 if why.errno not in (errno.EINVAL, errno.ENOSYS):
598 598 raise
599 599 except AttributeError: # no symlink in os
600 600 pass
601 601 return posixfile(pathname).read()
602 602
603 603 def fstat(fp):
604 604 '''stat file object that may not have fileno method.'''
605 605 try:
606 606 return os.fstat(fp.fileno())
607 607 except AttributeError:
608 608 return os.stat(fp.name)
609 609
610 610 # File system features
611 611
612 612 def checkcase(path):
613 613 """
614 614 Check whether the given path is on a case-sensitive filesystem
615 615
616 616 Requires a path (like /foo/.hg) ending with a foldable final
617 617 directory component.
618 618 """
619 619 s1 = os.stat(path)
620 620 d, b = os.path.split(path)
621 621 p2 = os.path.join(d, b.upper())
622 622 if path == p2:
623 623 p2 = os.path.join(d, b.lower())
624 624 try:
625 625 s2 = os.stat(p2)
626 626 if s2 == s1:
627 627 return False
628 628 return True
629 629 except:
630 630 return True
631 631
632 632 _fspathcache = {}
633 633 def fspath(name, root):
634 634 '''Get name in the case stored in the filesystem
635 635
636 636 The name is either relative to root, or it is an absolute path starting
637 637 with root. Note that this function is unnecessary, and should not be
638 638 called, for case-sensitive filesystems (simply because it's expensive).
639 639 '''
640 640 # If name is absolute, make it relative
641 641 if name.lower().startswith(root.lower()):
642 642 l = len(root)
643 if name[l] == os.sep or name[l] == os.altsep:
643 if name[l] in (os.sep, os.altsep):
644 644 l = l + 1
645 645 name = name[l:]
646 646
647 647 if not os.path.lexists(os.path.join(root, name)):
648 648 return None
649 649
650 650 seps = os.sep
651 651 if os.altsep:
652 652 seps = seps + os.altsep
653 653 # Protect backslashes. This gets silly very quickly.
654 654 seps.replace('\\','\\\\')
655 655 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
656 656 dir = os.path.normcase(os.path.normpath(root))
657 657 result = []
658 658 for part, sep in pattern.findall(name):
659 659 if sep:
660 660 result.append(sep)
661 661 continue
662 662
663 663 if dir not in _fspathcache:
664 664 _fspathcache[dir] = os.listdir(dir)
665 665 contents = _fspathcache[dir]
666 666
667 667 lpart = part.lower()
668 668 lenp = len(part)
669 669 for n in contents:
670 670 if lenp == len(n) and n.lower() == lpart:
671 671 result.append(n)
672 672 break
673 673 else:
674 674 # Cannot happen, as the file exists!
675 675 result.append(part)
676 676 dir = os.path.join(dir, lpart)
677 677
678 678 return ''.join(result)
679 679
680 680 def checkexec(path):
681 681 """
682 682 Check whether the given path is on a filesystem with UNIX-like exec flags
683 683
684 684 Requires a directory (like /foo/.hg)
685 685 """
686 686
687 687 # VFAT on some Linux versions can flip mode but it doesn't persist
688 688 # a FS remount. Frequently we can detect it if files are created
689 689 # with exec bit on.
690 690
691 691 try:
692 692 EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
693 693 fh, fn = tempfile.mkstemp(dir=path, prefix='hg-checkexec-')
694 694 try:
695 695 os.close(fh)
696 696 m = os.stat(fn).st_mode & 0777
697 697 new_file_has_exec = m & EXECFLAGS
698 698 os.chmod(fn, m ^ EXECFLAGS)
699 699 exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
700 700 finally:
701 701 os.unlink(fn)
702 702 except (IOError, OSError):
703 703 # we don't care, the user probably won't be able to commit anyway
704 704 return False
705 705 return not (new_file_has_exec or exec_flags_cannot_flip)
706 706
707 707 def checklink(path):
708 708 """check whether the given path is on a symlink-capable filesystem"""
709 709 # mktemp is not racy because symlink creation will fail if the
710 710 # file already exists
711 711 name = tempfile.mktemp(dir=path, prefix='hg-checklink-')
712 712 try:
713 713 os.symlink(".", name)
714 714 os.unlink(name)
715 715 return True
716 716 except (OSError, AttributeError):
717 717 return False
718 718
719 719 def needbinarypatch():
720 720 """return True if patches should be applied in binary mode by default."""
721 721 return os.name == 'nt'
722 722
723 723 def endswithsep(path):
724 724 '''Check path ends with os.sep or os.altsep.'''
725 725 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
726 726
727 727 def splitpath(path):
728 728 '''Split path by os.sep.
729 729 Note that this function does not use os.altsep because this is
730 730 an alternative of simple "xxx.split(os.sep)".
731 731 It is recommended to use os.path.normpath() before using this
732 732 function if need.'''
733 733 return path.split(os.sep)
734 734
735 735 def gui():
736 736 '''Are we running in a GUI?'''
737 737 return os.name == "nt" or os.name == "mac" or os.environ.get("DISPLAY")
738 738
739 739 def mktempcopy(name, emptyok=False, createmode=None):
740 740 """Create a temporary file with the same contents from name
741 741
742 742 The permission bits are copied from the original file.
743 743
744 744 If the temporary file is going to be truncated immediately, you
745 745 can use emptyok=True as an optimization.
746 746
747 747 Returns the name of the temporary file.
748 748 """
749 749 d, fn = os.path.split(name)
750 750 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
751 751 os.close(fd)
752 752 # Temporary files are created with mode 0600, which is usually not
753 753 # what we want. If the original file already exists, just copy
754 754 # its mode. Otherwise, manually obey umask.
755 755 try:
756 756 st_mode = os.lstat(name).st_mode & 0777
757 757 except OSError, inst:
758 758 if inst.errno != errno.ENOENT:
759 759 raise
760 760 st_mode = createmode
761 761 if st_mode is None:
762 762 st_mode = ~umask
763 763 st_mode &= 0666
764 764 os.chmod(temp, st_mode)
765 765 if emptyok:
766 766 return temp
767 767 try:
768 768 try:
769 769 ifp = posixfile(name, "rb")
770 770 except IOError, inst:
771 771 if inst.errno == errno.ENOENT:
772 772 return temp
773 773 if not getattr(inst, 'filename', None):
774 774 inst.filename = name
775 775 raise
776 776 ofp = posixfile(temp, "wb")
777 777 for chunk in filechunkiter(ifp):
778 778 ofp.write(chunk)
779 779 ifp.close()
780 780 ofp.close()
781 781 except:
782 782 try: os.unlink(temp)
783 783 except: pass
784 784 raise
785 785 return temp
786 786
787 787 class atomictempfile(object):
788 788 """file-like object that atomically updates a file
789 789
790 790 All writes will be redirected to a temporary copy of the original
791 791 file. When rename is called, the copy is renamed to the original
792 792 name, making the changes visible.
793 793 """
794 794 def __init__(self, name, mode='w+b', createmode=None):
795 795 self.__name = name
796 796 self._fp = None
797 797 self.temp = mktempcopy(name, emptyok=('w' in mode),
798 798 createmode=createmode)
799 799 self._fp = posixfile(self.temp, mode)
800 800
801 801 def __getattr__(self, name):
802 802 return getattr(self._fp, name)
803 803
804 804 def rename(self):
805 805 if not self._fp.closed:
806 806 self._fp.close()
807 807 rename(self.temp, localpath(self.__name))
808 808
809 809 def __del__(self):
810 810 if not self._fp:
811 811 return
812 812 if not self._fp.closed:
813 813 try:
814 814 os.unlink(self.temp)
815 815 except: pass
816 816 self._fp.close()
817 817
818 818 def makedirs(name, mode=None):
819 819 """recursive directory creation with parent mode inheritance"""
820 820 try:
821 821 os.mkdir(name)
822 822 if mode is not None:
823 823 os.chmod(name, mode)
824 824 return
825 825 except OSError, err:
826 826 if err.errno == errno.EEXIST:
827 827 return
828 828 if err.errno != errno.ENOENT:
829 829 raise
830 830 parent = os.path.abspath(os.path.dirname(name))
831 831 makedirs(parent, mode)
832 832 makedirs(name, mode)
833 833
834 834 class opener(object):
835 835 """Open files relative to a base directory
836 836
837 837 This class is used to hide the details of COW semantics and
838 838 remote file access from higher level code.
839 839 """
840 840 def __init__(self, base, audit=True):
841 841 self.base = base
842 842 if audit:
843 843 self.auditor = path_auditor(base)
844 844 else:
845 845 self.auditor = always
846 846 self.createmode = None
847 847
848 848 @propertycache
849 849 def _can_symlink(self):
850 850 return checklink(self.base)
851 851
852 852 def _fixfilemode(self, name):
853 853 if self.createmode is None:
854 854 return
855 855 os.chmod(name, self.createmode & 0666)
856 856
857 857 def __call__(self, path, mode="r", text=False, atomictemp=False):
858 858 self.auditor(path)
859 859 f = os.path.join(self.base, path)
860 860
861 861 if not text and "b" not in mode:
862 862 mode += "b" # for that other OS
863 863
864 864 nlink = -1
865 865 if mode not in ("r", "rb"):
866 866 try:
867 867 nlink = nlinks(f)
868 868 except OSError:
869 869 nlink = 0
870 870 d = os.path.dirname(f)
871 871 if not os.path.isdir(d):
872 872 makedirs(d, self.createmode)
873 873 if atomictemp:
874 874 return atomictempfile(f, mode, self.createmode)
875 875 if nlink > 1:
876 876 rename(mktempcopy(f), f)
877 877 fp = posixfile(f, mode)
878 878 if nlink == 0:
879 879 self._fixfilemode(f)
880 880 return fp
881 881
882 882 def symlink(self, src, dst):
883 883 self.auditor(dst)
884 884 linkname = os.path.join(self.base, dst)
885 885 try:
886 886 os.unlink(linkname)
887 887 except OSError:
888 888 pass
889 889
890 890 dirname = os.path.dirname(linkname)
891 891 if not os.path.exists(dirname):
892 892 makedirs(dirname, self.createmode)
893 893
894 894 if self._can_symlink:
895 895 try:
896 896 os.symlink(src, linkname)
897 897 except OSError, err:
898 898 raise OSError(err.errno, _('could not symlink to %r: %s') %
899 899 (src, err.strerror), linkname)
900 900 else:
901 901 f = self(dst, "w")
902 902 f.write(src)
903 903 f.close()
904 904 self._fixfilemode(dst)
905 905
906 906 class chunkbuffer(object):
907 907 """Allow arbitrary sized chunks of data to be efficiently read from an
908 908 iterator over chunks of arbitrary size."""
909 909
910 910 def __init__(self, in_iter):
911 911 """in_iter is the iterator that's iterating over the input chunks.
912 912 targetsize is how big a buffer to try to maintain."""
913 913 def splitbig(chunks):
914 914 for chunk in chunks:
915 915 if len(chunk) > 2**20:
916 916 pos = 0
917 917 while pos < len(chunk):
918 918 end = pos + 2 ** 18
919 919 yield chunk[pos:end]
920 920 pos = end
921 921 else:
922 922 yield chunk
923 923 self.iter = splitbig(in_iter)
924 924 self._queue = []
925 925
926 926 def read(self, l):
927 927 """Read L bytes of data from the iterator of chunks of data.
928 928 Returns less than L bytes if the iterator runs dry."""
929 929 left = l
930 930 buf = ''
931 931 queue = self._queue
932 932 while left > 0:
933 933 # refill the queue
934 934 if not queue:
935 935 target = 2**18
936 936 for chunk in self.iter:
937 937 queue.append(chunk)
938 938 target -= len(chunk)
939 939 if target <= 0:
940 940 break
941 941 if not queue:
942 942 break
943 943
944 944 chunk = queue.pop(0)
945 945 left -= len(chunk)
946 946 if left < 0:
947 947 queue.insert(0, chunk[left:])
948 948 buf += chunk[:left]
949 949 else:
950 950 buf += chunk
951 951
952 952 return buf
953 953
954 954 def filechunkiter(f, size=65536, limit=None):
955 955 """Create a generator that produces the data in the file size
956 956 (default 65536) bytes at a time, up to optional limit (default is
957 957 to read all data). Chunks may be less than size bytes if the
958 958 chunk is the last chunk in the file, or the file is a socket or
959 959 some other type of file that sometimes reads less data than is
960 960 requested."""
961 961 assert size >= 0
962 962 assert limit is None or limit >= 0
963 963 while True:
964 964 if limit is None:
965 965 nbytes = size
966 966 else:
967 967 nbytes = min(limit, size)
968 968 s = nbytes and f.read(nbytes)
969 969 if not s:
970 970 break
971 971 if limit:
972 972 limit -= len(s)
973 973 yield s
974 974
975 975 def makedate():
976 976 lt = time.localtime()
977 977 if lt[8] == 1 and time.daylight:
978 978 tz = time.altzone
979 979 else:
980 980 tz = time.timezone
981 981 return time.mktime(lt), tz
982 982
983 983 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
984 984 """represent a (unixtime, offset) tuple as a localized time.
985 985 unixtime is seconds since the epoch, and offset is the time zone's
986 986 number of seconds away from UTC. if timezone is false, do not
987 987 append time zone to string."""
988 988 t, tz = date or makedate()
989 989 if "%1" in format or "%2" in format:
990 990 sign = (tz > 0) and "-" or "+"
991 991 minutes = abs(tz) // 60
992 992 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
993 993 format = format.replace("%2", "%02d" % (minutes % 60))
994 994 s = time.strftime(format, time.gmtime(float(t) - tz))
995 995 return s
996 996
997 997 def shortdate(date=None):
998 998 """turn (timestamp, tzoff) tuple into iso 8631 date."""
999 999 return datestr(date, format='%Y-%m-%d')
1000 1000
1001 1001 def strdate(string, format, defaults=[]):
1002 1002 """parse a localized time string and return a (unixtime, offset) tuple.
1003 1003 if the string cannot be parsed, ValueError is raised."""
1004 1004 def timezone(string):
1005 1005 tz = string.split()[-1]
1006 1006 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1007 1007 sign = (tz[0] == "+") and 1 or -1
1008 1008 hours = int(tz[1:3])
1009 1009 minutes = int(tz[3:5])
1010 1010 return -sign * (hours * 60 + minutes) * 60
1011 if tz == "GMT" or tz == "UTC":
1011 if tz in ("GMT", "UTC"):
1012 1012 return 0
1013 1013 return None
1014 1014
1015 1015 # NOTE: unixtime = localunixtime + offset
1016 1016 offset, date = timezone(string), string
1017 1017 if offset != None:
1018 1018 date = " ".join(string.split()[:-1])
1019 1019
1020 1020 # add missing elements from defaults
1021 1021 for part in defaults:
1022 1022 found = [True for p in part if ("%"+p) in format]
1023 1023 if not found:
1024 1024 date += "@" + defaults[part]
1025 1025 format += "@%" + part[0]
1026 1026
1027 1027 timetuple = time.strptime(date, format)
1028 1028 localunixtime = int(calendar.timegm(timetuple))
1029 1029 if offset is None:
1030 1030 # local timezone
1031 1031 unixtime = int(time.mktime(timetuple))
1032 1032 offset = unixtime - localunixtime
1033 1033 else:
1034 1034 unixtime = localunixtime + offset
1035 1035 return unixtime, offset
1036 1036
1037 1037 def parsedate(date, formats=None, defaults=None):
1038 1038 """parse a localized date/time string and return a (unixtime, offset) tuple.
1039 1039
1040 1040 The date may be a "unixtime offset" string or in one of the specified
1041 1041 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1042 1042 """
1043 1043 if not date:
1044 1044 return 0, 0
1045 1045 if isinstance(date, tuple) and len(date) == 2:
1046 1046 return date
1047 1047 if not formats:
1048 1048 formats = defaultdateformats
1049 1049 date = date.strip()
1050 1050 try:
1051 1051 when, offset = map(int, date.split(' '))
1052 1052 except ValueError:
1053 1053 # fill out defaults
1054 1054 if not defaults:
1055 1055 defaults = {}
1056 1056 now = makedate()
1057 1057 for part in "d mb yY HI M S".split():
1058 1058 if part not in defaults:
1059 1059 if part[0] in "HMS":
1060 1060 defaults[part] = "00"
1061 1061 else:
1062 1062 defaults[part] = datestr(now, "%" + part[0])
1063 1063
1064 1064 for format in formats:
1065 1065 try:
1066 1066 when, offset = strdate(date, format, defaults)
1067 1067 except (ValueError, OverflowError):
1068 1068 pass
1069 1069 else:
1070 1070 break
1071 1071 else:
1072 1072 raise Abort(_('invalid date: %r') % date)
1073 1073 # validate explicit (probably user-specified) date and
1074 1074 # time zone offset. values must fit in signed 32 bits for
1075 1075 # current 32-bit linux runtimes. timezones go from UTC-12
1076 1076 # to UTC+14
1077 1077 if abs(when) > 0x7fffffff:
1078 1078 raise Abort(_('date exceeds 32 bits: %d') % when)
1079 1079 if offset < -50400 or offset > 43200:
1080 1080 raise Abort(_('impossible time zone offset: %d') % offset)
1081 1081 return when, offset
1082 1082
1083 1083 def matchdate(date):
1084 1084 """Return a function that matches a given date match specifier
1085 1085
1086 1086 Formats include:
1087 1087
1088 1088 '{date}' match a given date to the accuracy provided
1089 1089
1090 1090 '<{date}' on or before a given date
1091 1091
1092 1092 '>{date}' on or after a given date
1093 1093
1094 1094 """
1095 1095
1096 1096 def lower(date):
1097 1097 d = dict(mb="1", d="1")
1098 1098 return parsedate(date, extendeddateformats, d)[0]
1099 1099
1100 1100 def upper(date):
1101 1101 d = dict(mb="12", HI="23", M="59", S="59")
1102 1102 for days in "31 30 29".split():
1103 1103 try:
1104 1104 d["d"] = days
1105 1105 return parsedate(date, extendeddateformats, d)[0]
1106 1106 except:
1107 1107 pass
1108 1108 d["d"] = "28"
1109 1109 return parsedate(date, extendeddateformats, d)[0]
1110 1110
1111 1111 date = date.strip()
1112 1112 if date[0] == "<":
1113 1113 when = upper(date[1:])
1114 1114 return lambda x: x <= when
1115 1115 elif date[0] == ">":
1116 1116 when = lower(date[1:])
1117 1117 return lambda x: x >= when
1118 1118 elif date[0] == "-":
1119 1119 try:
1120 1120 days = int(date[1:])
1121 1121 except ValueError:
1122 1122 raise Abort(_("invalid day spec: %s") % date[1:])
1123 1123 when = makedate()[0] - days * 3600 * 24
1124 1124 return lambda x: x >= when
1125 1125 elif " to " in date:
1126 1126 a, b = date.split(" to ")
1127 1127 start, stop = lower(a), upper(b)
1128 1128 return lambda x: x >= start and x <= stop
1129 1129 else:
1130 1130 start, stop = lower(date), upper(date)
1131 1131 return lambda x: x >= start and x <= stop
1132 1132
1133 1133 def shortuser(user):
1134 1134 """Return a short representation of a user name or email address."""
1135 1135 f = user.find('@')
1136 1136 if f >= 0:
1137 1137 user = user[:f]
1138 1138 f = user.find('<')
1139 1139 if f >= 0:
1140 1140 user = user[f + 1:]
1141 1141 f = user.find(' ')
1142 1142 if f >= 0:
1143 1143 user = user[:f]
1144 1144 f = user.find('.')
1145 1145 if f >= 0:
1146 1146 user = user[:f]
1147 1147 return user
1148 1148
1149 1149 def email(author):
1150 1150 '''get email of author.'''
1151 1151 r = author.find('>')
1152 1152 if r == -1:
1153 1153 r = None
1154 1154 return author[author.find('<') + 1:r]
1155 1155
1156 1156 def ellipsis(text, maxlength=400):
1157 1157 """Trim string to at most maxlength (default: 400) characters."""
1158 1158 if len(text) <= maxlength:
1159 1159 return text
1160 1160 else:
1161 1161 return "%s..." % (text[:maxlength - 3])
1162 1162
1163 1163 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
1164 1164 '''yield every hg repository under path, recursively.'''
1165 1165 def errhandler(err):
1166 1166 if err.filename == path:
1167 1167 raise err
1168 1168 if followsym and hasattr(os.path, 'samestat'):
1169 1169 def _add_dir_if_not_there(dirlst, dirname):
1170 1170 match = False
1171 1171 samestat = os.path.samestat
1172 1172 dirstat = os.stat(dirname)
1173 1173 for lstdirstat in dirlst:
1174 1174 if samestat(dirstat, lstdirstat):
1175 1175 match = True
1176 1176 break
1177 1177 if not match:
1178 1178 dirlst.append(dirstat)
1179 1179 return not match
1180 1180 else:
1181 1181 followsym = False
1182 1182
1183 1183 if (seen_dirs is None) and followsym:
1184 1184 seen_dirs = []
1185 1185 _add_dir_if_not_there(seen_dirs, path)
1186 1186 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
1187 1187 dirs.sort()
1188 1188 if '.hg' in dirs:
1189 1189 yield root # found a repository
1190 1190 qroot = os.path.join(root, '.hg', 'patches')
1191 1191 if os.path.isdir(os.path.join(qroot, '.hg')):
1192 1192 yield qroot # we have a patch queue repo here
1193 1193 if recurse:
1194 1194 # avoid recursing inside the .hg directory
1195 1195 dirs.remove('.hg')
1196 1196 else:
1197 1197 dirs[:] = [] # don't descend further
1198 1198 elif followsym:
1199 1199 newdirs = []
1200 1200 for d in dirs:
1201 1201 fname = os.path.join(root, d)
1202 1202 if _add_dir_if_not_there(seen_dirs, fname):
1203 1203 if os.path.islink(fname):
1204 1204 for hgname in walkrepos(fname, True, seen_dirs):
1205 1205 yield hgname
1206 1206 else:
1207 1207 newdirs.append(d)
1208 1208 dirs[:] = newdirs
1209 1209
1210 1210 _rcpath = None
1211 1211
1212 1212 def os_rcpath():
1213 1213 '''return default os-specific hgrc search path'''
1214 1214 path = system_rcpath()
1215 1215 path.extend(user_rcpath())
1216 1216 path = [os.path.normpath(f) for f in path]
1217 1217 return path
1218 1218
1219 1219 def rcpath():
1220 1220 '''return hgrc search path. if env var HGRCPATH is set, use it.
1221 1221 for each item in path, if directory, use files ending in .rc,
1222 1222 else use item.
1223 1223 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1224 1224 if no HGRCPATH, use default os-specific path.'''
1225 1225 global _rcpath
1226 1226 if _rcpath is None:
1227 1227 if 'HGRCPATH' in os.environ:
1228 1228 _rcpath = []
1229 1229 for p in os.environ['HGRCPATH'].split(os.pathsep):
1230 1230 if not p:
1231 1231 continue
1232 1232 p = expandpath(p)
1233 1233 if os.path.isdir(p):
1234 1234 for f, kind in osutil.listdir(p):
1235 1235 if f.endswith('.rc'):
1236 1236 _rcpath.append(os.path.join(p, f))
1237 1237 else:
1238 1238 _rcpath.append(p)
1239 1239 else:
1240 1240 _rcpath = os_rcpath()
1241 1241 return _rcpath
1242 1242
1243 1243 def bytecount(nbytes):
1244 1244 '''return byte count formatted as readable string, with units'''
1245 1245
1246 1246 units = (
1247 1247 (100, 1 << 30, _('%.0f GB')),
1248 1248 (10, 1 << 30, _('%.1f GB')),
1249 1249 (1, 1 << 30, _('%.2f GB')),
1250 1250 (100, 1 << 20, _('%.0f MB')),
1251 1251 (10, 1 << 20, _('%.1f MB')),
1252 1252 (1, 1 << 20, _('%.2f MB')),
1253 1253 (100, 1 << 10, _('%.0f KB')),
1254 1254 (10, 1 << 10, _('%.1f KB')),
1255 1255 (1, 1 << 10, _('%.2f KB')),
1256 1256 (1, 1, _('%.0f bytes')),
1257 1257 )
1258 1258
1259 1259 for multiplier, divisor, format in units:
1260 1260 if nbytes >= divisor * multiplier:
1261 1261 return format % (nbytes / float(divisor))
1262 1262 return units[-1][2] % nbytes
1263 1263
1264 1264 def drop_scheme(scheme, path):
1265 1265 sc = scheme + ':'
1266 1266 if path.startswith(sc):
1267 1267 path = path[len(sc):]
1268 1268 if path.startswith('//'):
1269 1269 if scheme == 'file':
1270 1270 i = path.find('/', 2)
1271 1271 if i == -1:
1272 1272 return ''
1273 1273 # On Windows, absolute paths are rooted at the current drive
1274 1274 # root. On POSIX they are rooted at the file system root.
1275 1275 if os.name == 'nt':
1276 1276 droot = os.path.splitdrive(os.getcwd())[0] + '/'
1277 1277 path = os.path.join(droot, path[i + 1:])
1278 1278 else:
1279 1279 path = path[i:]
1280 1280 else:
1281 1281 path = path[2:]
1282 1282 return path
1283 1283
1284 1284 def uirepr(s):
1285 1285 # Avoid double backslash in Windows path repr()
1286 1286 return repr(s).replace('\\\\', '\\')
1287 1287
1288 1288 #### naming convention of below implementation follows 'textwrap' module
1289 1289
1290 1290 class MBTextWrapper(textwrap.TextWrapper):
1291 1291 def __init__(self, **kwargs):
1292 1292 textwrap.TextWrapper.__init__(self, **kwargs)
1293 1293
1294 1294 def _cutdown(self, str, space_left):
1295 1295 l = 0
1296 1296 ucstr = unicode(str, encoding.encoding)
1297 1297 w = unicodedata.east_asian_width
1298 1298 for i in xrange(len(ucstr)):
1299 1299 l += w(ucstr[i]) in 'WFA' and 2 or 1
1300 1300 if space_left < l:
1301 1301 return (ucstr[:i].encode(encoding.encoding),
1302 1302 ucstr[i:].encode(encoding.encoding))
1303 1303 return str, ''
1304 1304
1305 1305 # ----------------------------------------
1306 1306 # overriding of base class
1307 1307
1308 1308 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1309 1309 space_left = max(width - cur_len, 1)
1310 1310
1311 1311 if self.break_long_words:
1312 1312 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1313 1313 cur_line.append(cut)
1314 1314 reversed_chunks[-1] = res
1315 1315 elif not cur_line:
1316 1316 cur_line.append(reversed_chunks.pop())
1317 1317
1318 1318 #### naming convention of above implementation follows 'textwrap' module
1319 1319
1320 1320 def wrap(line, width=None, initindent='', hangindent=''):
1321 1321 if width is None:
1322 1322 width = termwidth() - 2
1323 1323 maxindent = max(len(hangindent), len(initindent))
1324 1324 if width <= maxindent:
1325 1325 # adjust for weird terminal size
1326 1326 width = max(78, maxindent + 1)
1327 1327 wrapper = MBTextWrapper(width=width,
1328 1328 initial_indent=initindent,
1329 1329 subsequent_indent=hangindent)
1330 1330 return wrapper.fill(line)
1331 1331
1332 1332 def iterlines(iterator):
1333 1333 for chunk in iterator:
1334 1334 for line in chunk.splitlines():
1335 1335 yield line
1336 1336
1337 1337 def expandpath(path):
1338 1338 return os.path.expanduser(os.path.expandvars(path))
1339 1339
1340 1340 def hgcmd():
1341 1341 """Return the command used to execute current hg
1342 1342
1343 1343 This is different from hgexecutable() because on Windows we want
1344 1344 to avoid things opening new shell windows like batch files, so we
1345 1345 get either the python call or current executable.
1346 1346 """
1347 1347 if main_is_frozen():
1348 1348 return [sys.executable]
1349 1349 return gethgcmd()
1350 1350
1351 1351 def rundetached(args, condfn):
1352 1352 """Execute the argument list in a detached process.
1353 1353
1354 1354 condfn is a callable which is called repeatedly and should return
1355 1355 True once the child process is known to have started successfully.
1356 1356 At this point, the child process PID is returned. If the child
1357 1357 process fails to start or finishes before condfn() evaluates to
1358 1358 True, return -1.
1359 1359 """
1360 1360 # Windows case is easier because the child process is either
1361 1361 # successfully starting and validating the condition or exiting
1362 1362 # on failure. We just poll on its PID. On Unix, if the child
1363 1363 # process fails to start, it will be left in a zombie state until
1364 1364 # the parent wait on it, which we cannot do since we expect a long
1365 1365 # running process on success. Instead we listen for SIGCHLD telling
1366 1366 # us our child process terminated.
1367 1367 terminated = set()
1368 1368 def handler(signum, frame):
1369 1369 terminated.add(os.wait())
1370 1370 prevhandler = None
1371 1371 if hasattr(signal, 'SIGCHLD'):
1372 1372 prevhandler = signal.signal(signal.SIGCHLD, handler)
1373 1373 try:
1374 1374 pid = spawndetached(args)
1375 1375 while not condfn():
1376 1376 if ((pid in terminated or not testpid(pid))
1377 1377 and not condfn()):
1378 1378 return -1
1379 1379 time.sleep(0.1)
1380 1380 return pid
1381 1381 finally:
1382 1382 if prevhandler is not None:
1383 1383 signal.signal(signal.SIGCHLD, prevhandler)
1384 1384
1385 1385 try:
1386 1386 any, all = any, all
1387 1387 except NameError:
1388 1388 def any(iterable):
1389 1389 for i in iterable:
1390 1390 if i:
1391 1391 return True
1392 1392 return False
1393 1393
1394 1394 def all(iterable):
1395 1395 for i in iterable:
1396 1396 if not i:
1397 1397 return False
1398 1398 return True
1399 1399
1400 1400 def termwidth():
1401 1401 if 'COLUMNS' in os.environ:
1402 1402 try:
1403 1403 return int(os.environ['COLUMNS'])
1404 1404 except ValueError:
1405 1405 pass
1406 1406 return termwidth_()
1407 1407
1408 1408 def interpolate(prefix, mapping, s, fn=None):
1409 1409 """Return the result of interpolating items in the mapping into string s.
1410 1410
1411 1411 prefix is a single character string, or a two character string with
1412 1412 a backslash as the first character if the prefix needs to be escaped in
1413 1413 a regular expression.
1414 1414
1415 1415 fn is an optional function that will be applied to the replacement text
1416 1416 just before replacement.
1417 1417 """
1418 1418 fn = fn or (lambda s: s)
1419 1419 r = re.compile(r'%s(%s)' % (prefix, '|'.join(mapping.keys())))
1420 1420 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1421 1421
1422 1422 def getport(port):
1423 1423 """Return the port for a given network service.
1424 1424
1425 1425 If port is an integer, it's returned as is. If it's a string, it's
1426 1426 looked up using socket.getservbyname(). If there's no matching
1427 1427 service, util.Abort is raised.
1428 1428 """
1429 1429 try:
1430 1430 return int(port)
1431 1431 except ValueError:
1432 1432 pass
1433 1433
1434 1434 try:
1435 1435 return socket.getservbyname(port)
1436 1436 except socket.error:
1437 1437 raise Abort(_("no port number associated with service '%s'") % port)
1438 1438
1439 1439 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1440 1440 '0': False, 'no': False, 'false': False, 'off': False,
1441 1441 'never': False}
1442 1442
1443 1443 def parsebool(s):
1444 1444 """Parse s into a boolean.
1445 1445
1446 1446 If s is not a valid boolean, returns None.
1447 1447 """
1448 1448 return _booleans.get(s.lower(), None)
@@ -1,195 +1,195 b''
1 1 # win32.py - utility functions that use win32 API
2 2 #
3 3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Utility functions that use win32 API.
9 9
10 10 Mark Hammond's win32all package allows better functionality on
11 11 Windows. This module overrides definitions in util.py. If not
12 12 available, import of this module will fail, and generic code will be
13 13 used.
14 14 """
15 15
16 16 import win32api
17 17
18 18 import errno, os, sys, pywintypes, win32con, win32file, win32process
19 19 import winerror, win32gui, win32console
20 20 import osutil, encoding
21 21 from win32com.shell import shell, shellcon
22 22
23 23 def os_link(src, dst):
24 24 try:
25 25 win32file.CreateHardLink(dst, src)
26 26 except pywintypes.error:
27 27 raise OSError(errno.EINVAL, 'target implements hardlinks improperly')
28 28 except NotImplementedError: # Another fake error win Win98
29 29 raise OSError(errno.EINVAL, 'Hardlinking not supported')
30 30
31 31 def _getfileinfo(pathname):
32 32 """Return number of hardlinks for the given file."""
33 33 try:
34 34 fh = win32file.CreateFile(pathname,
35 35 win32file.GENERIC_READ, win32file.FILE_SHARE_READ,
36 36 None, win32file.OPEN_EXISTING, 0, None)
37 37 except pywintypes.error:
38 38 raise OSError(errno.ENOENT, 'The system cannot find the file specified')
39 39 try:
40 40 return win32file.GetFileInformationByHandle(fh)
41 41 finally:
42 42 fh.Close()
43 43
44 44 def nlinks(pathname):
45 45 """Return number of hardlinks for the given file."""
46 46 links = _getfileinfo(pathname)[7]
47 47 if links < 2:
48 48 # Known to be wrong for most network drives
49 49 dirname = os.path.dirname(pathname)
50 50 if not dirname:
51 51 dirname = '.'
52 52 dt = win32file.GetDriveType(dirname + '\\')
53 if dt == 4 or dt == 1:
53 if dt in (4, 1):
54 54 # Fake hardlink to force COW for network drives
55 55 links = 2
56 56 return links
57 57
58 58 def samefile(fpath1, fpath2):
59 59 """Returns whether fpath1 and fpath2 refer to the same file. This is only
60 60 guaranteed to work for files, not directories."""
61 61 res1 = _getfileinfo(fpath1)
62 62 res2 = _getfileinfo(fpath2)
63 63 # Index 4 is the volume serial number, and 8 and 9 contain the file ID
64 64 return res1[4] == res2[4] and res1[8] == res2[8] and res1[9] == res2[9]
65 65
66 66 def samedevice(fpath1, fpath2):
67 67 """Returns whether fpath1 and fpath2 are on the same device. This is only
68 68 guaranteed to work for files, not directories."""
69 69 res1 = _getfileinfo(fpath1)
70 70 res2 = _getfileinfo(fpath2)
71 71 return res1[4] == res2[4]
72 72
73 73 def testpid(pid):
74 74 '''return True if pid is still running or unable to
75 75 determine, False otherwise'''
76 76 try:
77 77 handle = win32api.OpenProcess(
78 78 win32con.PROCESS_QUERY_INFORMATION, False, pid)
79 79 if handle:
80 80 status = win32process.GetExitCodeProcess(handle)
81 81 return status == win32con.STILL_ACTIVE
82 82 except pywintypes.error, details:
83 83 return details[0] != winerror.ERROR_INVALID_PARAMETER
84 84 return True
85 85
86 86 def lookup_reg(key, valname=None, scope=None):
87 87 ''' Look up a key/value name in the Windows registry.
88 88
89 89 valname: value name. If unspecified, the default value for the key
90 90 is used.
91 91 scope: optionally specify scope for registry lookup, this can be
92 92 a sequence of scopes to look up in order. Default (CURRENT_USER,
93 93 LOCAL_MACHINE).
94 94 '''
95 95 try:
96 96 from _winreg import HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE, \
97 97 QueryValueEx, OpenKey
98 98 except ImportError:
99 99 return None
100 100
101 101 if scope is None:
102 102 scope = (HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE)
103 103 elif not isinstance(scope, (list, tuple)):
104 104 scope = (scope,)
105 105 for s in scope:
106 106 try:
107 107 val = QueryValueEx(OpenKey(s, key), valname)[0]
108 108 # never let a Unicode string escape into the wild
109 109 return encoding.tolocal(val.encode('UTF-8'))
110 110 except EnvironmentError:
111 111 pass
112 112
113 113 def system_rcpath_win32():
114 114 '''return default os-specific hgrc search path'''
115 115 proc = win32api.GetCurrentProcess()
116 116 try:
117 117 # This will fail on windows < NT
118 118 filename = win32process.GetModuleFileNameEx(proc, 0)
119 119 except:
120 120 filename = win32api.GetModuleFileName(0)
121 121 # Use mercurial.ini found in directory with hg.exe
122 122 progrc = os.path.join(os.path.dirname(filename), 'mercurial.ini')
123 123 if os.path.isfile(progrc):
124 124 return [progrc]
125 125 # Use hgrc.d found in directory with hg.exe
126 126 progrcd = os.path.join(os.path.dirname(filename), 'hgrc.d')
127 127 if os.path.isdir(progrcd):
128 128 rcpath = []
129 129 for f, kind in osutil.listdir(progrcd):
130 130 if f.endswith('.rc'):
131 131 rcpath.append(os.path.join(progrcd, f))
132 132 return rcpath
133 133 # else look for a system rcpath in the registry
134 134 try:
135 135 value = win32api.RegQueryValue(
136 136 win32con.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Mercurial')
137 137 rcpath = []
138 138 for p in value.split(os.pathsep):
139 139 if p.lower().endswith('mercurial.ini'):
140 140 rcpath.append(p)
141 141 elif os.path.isdir(p):
142 142 for f, kind in osutil.listdir(p):
143 143 if f.endswith('.rc'):
144 144 rcpath.append(os.path.join(p, f))
145 145 return rcpath
146 146 except pywintypes.error:
147 147 return []
148 148
149 149 def user_rcpath_win32():
150 150 '''return os-specific hgrc search path to the user dir'''
151 151 userdir = os.path.expanduser('~')
152 152 if sys.getwindowsversion()[3] != 2 and userdir == '~':
153 153 # We are on win < nt: fetch the APPDATA directory location and use
154 154 # the parent directory as the user home dir.
155 155 appdir = shell.SHGetPathFromIDList(
156 156 shell.SHGetSpecialFolderLocation(0, shellcon.CSIDL_APPDATA))
157 157 userdir = os.path.dirname(appdir)
158 158 return [os.path.join(userdir, 'mercurial.ini'),
159 159 os.path.join(userdir, '.hgrc')]
160 160
161 161 def getuser():
162 162 '''return name of current user'''
163 163 return win32api.GetUserName()
164 164
165 165 def set_signal_handler_win32():
166 166 """Register a termination handler for console events including
167 167 CTRL+C. python signal handlers do not work well with socket
168 168 operations.
169 169 """
170 170 def handler(event):
171 171 win32process.ExitProcess(1)
172 172 win32api.SetConsoleCtrlHandler(handler)
173 173
174 174 def hidewindow():
175 175 def callback(*args, **kwargs):
176 176 hwnd, pid = args
177 177 wpid = win32process.GetWindowThreadProcessId(hwnd)[1]
178 178 if pid == wpid:
179 179 win32gui.ShowWindow(hwnd, win32con.SW_HIDE)
180 180
181 181 pid = win32process.GetCurrentProcessId()
182 182 win32gui.EnumWindows(callback, pid)
183 183
184 184 def termwidth_():
185 185 try:
186 186 # Query stderr to avoid problems with redirections
187 187 screenbuf = win32console.GetStdHandle(win32console.STD_ERROR_HANDLE)
188 188 try:
189 189 window = screenbuf.GetConsoleScreenBufferInfo()['Window']
190 190 width = window.Right - window.Left
191 191 return width
192 192 finally:
193 193 screenbuf.Detach()
194 194 except pywintypes.error:
195 195 return 79
General Comments 0
You need to be logged in to leave comments. Login now