##// END OF EJS Templates
replace os.stat with os.lstat in some where.
Vadim Gelfer -
r2448:b77a2ef6 default
parent child Browse files
Show More
@@ -1,487 +1,487 b''
1 1 """
2 2 dirstate.py - working directory tracking for mercurial
3 3
4 4 Copyright 2005 Matt Mackall <mpm@selenic.com>
5 5
6 6 This software may be used and distributed according to the terms
7 7 of the GNU General Public License, incorporated herein by reference.
8 8 """
9 9
10 10 import struct, os
11 11 from node import *
12 12 from i18n import gettext as _
13 13 from demandload import *
14 14 demandload(globals(), "time bisect stat util re errno")
15 15
16 16 class dirstate(object):
17 17 format = ">cllll"
18 18
19 19 def __init__(self, opener, ui, root):
20 20 self.opener = opener
21 21 self.root = root
22 22 self.dirty = 0
23 23 self.ui = ui
24 24 self.map = None
25 25 self.pl = None
26 26 self.copies = {}
27 27 self.ignorefunc = None
28 28 self.blockignore = False
29 29
30 30 def wjoin(self, f):
31 31 return os.path.join(self.root, f)
32 32
33 33 def getcwd(self):
34 34 cwd = os.getcwd()
35 35 if cwd == self.root: return ''
36 36 return cwd[len(self.root) + 1:]
37 37
38 38 def hgignore(self):
39 39 '''return the contents of .hgignore files as a list of patterns.
40 40
41 41 the files parsed for patterns include:
42 42 .hgignore in the repository root
43 43 any additional files specified in the [ui] section of ~/.hgrc
44 44
45 45 trailing white space is dropped.
46 46 the escape character is backslash.
47 47 comments start with #.
48 48 empty lines are skipped.
49 49
50 50 lines can be of the following formats:
51 51
52 52 syntax: regexp # defaults following lines to non-rooted regexps
53 53 syntax: glob # defaults following lines to non-rooted globs
54 54 re:pattern # non-rooted regular expression
55 55 glob:pattern # non-rooted glob
56 56 pattern # pattern of the current default type'''
57 57 syntaxes = {'re': 'relre:', 'regexp': 'relre:', 'glob': 'relglob:'}
58 58 def parselines(fp):
59 59 for line in fp:
60 60 escape = False
61 61 for i in xrange(len(line)):
62 62 if escape: escape = False
63 63 elif line[i] == '\\': escape = True
64 64 elif line[i] == '#': break
65 65 line = line[:i].rstrip()
66 66 if line: yield line
67 67 repoignore = self.wjoin('.hgignore')
68 68 files = [repoignore]
69 69 files.extend(self.ui.hgignorefiles())
70 70 pats = {}
71 71 for f in files:
72 72 try:
73 73 pats[f] = []
74 74 fp = open(f)
75 75 syntax = 'relre:'
76 76 for line in parselines(fp):
77 77 if line.startswith('syntax:'):
78 78 s = line[7:].strip()
79 79 try:
80 80 syntax = syntaxes[s]
81 81 except KeyError:
82 82 self.ui.warn(_("%s: ignoring invalid "
83 83 "syntax '%s'\n") % (f, s))
84 84 continue
85 85 pat = syntax + line
86 86 for s in syntaxes.values():
87 87 if line.startswith(s):
88 88 pat = line
89 89 break
90 90 pats[f].append(pat)
91 91 except IOError, inst:
92 92 if f != repoignore:
93 93 self.ui.warn(_("skipping unreadable ignore file"
94 94 " '%s': %s\n") % (f, inst.strerror))
95 95 return pats
96 96
97 97 def ignore(self, fn):
98 98 '''default match function used by dirstate and
99 99 localrepository. this honours the repository .hgignore file
100 100 and any other files specified in the [ui] section of .hgrc.'''
101 101 if self.blockignore:
102 102 return False
103 103 if not self.ignorefunc:
104 104 ignore = self.hgignore()
105 105 allpats = []
106 106 [allpats.extend(patlist) for patlist in ignore.values()]
107 107 if allpats:
108 108 try:
109 109 files, self.ignorefunc, anypats = (
110 110 util.matcher(self.root, inc=allpats, src='.hgignore'))
111 111 except util.Abort:
112 112 # Re-raise an exception where the src is the right file
113 113 for f, patlist in ignore.items():
114 114 files, self.ignorefunc, anypats = (
115 115 util.matcher(self.root, inc=patlist, src=f))
116 116 else:
117 117 self.ignorefunc = util.never
118 118 return self.ignorefunc(fn)
119 119
120 120 def __del__(self):
121 121 if self.dirty:
122 122 self.write()
123 123
124 124 def __getitem__(self, key):
125 125 try:
126 126 return self.map[key]
127 127 except TypeError:
128 128 self.lazyread()
129 129 return self[key]
130 130
131 131 def __contains__(self, key):
132 132 self.lazyread()
133 133 return key in self.map
134 134
135 135 def parents(self):
136 136 self.lazyread()
137 137 return self.pl
138 138
139 139 def markdirty(self):
140 140 if not self.dirty:
141 141 self.dirty = 1
142 142
143 143 def setparents(self, p1, p2=nullid):
144 144 self.lazyread()
145 145 self.markdirty()
146 146 self.pl = p1, p2
147 147
148 148 def state(self, key):
149 149 try:
150 150 return self[key][0]
151 151 except KeyError:
152 152 return "?"
153 153
154 154 def lazyread(self):
155 155 if self.map is None:
156 156 self.read()
157 157
158 158 def parse(self, st):
159 159 self.pl = [st[:20], st[20: 40]]
160 160
161 161 # deref fields so they will be local in loop
162 162 map = self.map
163 163 copies = self.copies
164 164 format = self.format
165 165 unpack = struct.unpack
166 166
167 167 pos = 40
168 168 e_size = struct.calcsize(format)
169 169
170 170 while pos < len(st):
171 171 newpos = pos + e_size
172 172 e = unpack(format, st[pos:newpos])
173 173 l = e[4]
174 174 pos = newpos
175 175 newpos = pos + l
176 176 f = st[pos:newpos]
177 177 if '\0' in f:
178 178 f, c = f.split('\0')
179 179 copies[f] = c
180 180 map[f] = e[:4]
181 181 pos = newpos
182 182
183 183 def read(self):
184 184 self.map = {}
185 185 self.pl = [nullid, nullid]
186 186 try:
187 187 st = self.opener("dirstate").read()
188 188 if st:
189 189 self.parse(st)
190 190 except IOError, err:
191 191 if err.errno != errno.ENOENT: raise
192 192
193 193 def copy(self, source, dest):
194 194 self.lazyread()
195 195 self.markdirty()
196 196 self.copies[dest] = source
197 197
198 198 def copied(self, file):
199 199 return self.copies.get(file, None)
200 200
201 201 def update(self, files, state, **kw):
202 202 ''' current states:
203 203 n normal
204 204 m needs merging
205 205 r marked for removal
206 206 a marked for addition'''
207 207
208 208 if not files: return
209 209 self.lazyread()
210 210 self.markdirty()
211 211 for f in files:
212 212 if state == "r":
213 213 self.map[f] = ('r', 0, 0, 0)
214 214 else:
215 215 s = os.lstat(self.wjoin(f))
216 216 st_size = kw.get('st_size', s.st_size)
217 217 st_mtime = kw.get('st_mtime', s.st_mtime)
218 218 self.map[f] = (state, s.st_mode, st_size, st_mtime)
219 219 if self.copies.has_key(f):
220 220 del self.copies[f]
221 221
222 222 def forget(self, files):
223 223 if not files: return
224 224 self.lazyread()
225 225 self.markdirty()
226 226 for f in files:
227 227 try:
228 228 del self.map[f]
229 229 except KeyError:
230 230 self.ui.warn(_("not in dirstate: %s!\n") % f)
231 231 pass
232 232
233 233 def clear(self):
234 234 self.map = {}
235 235 self.copies = {}
236 236 self.markdirty()
237 237
238 238 def rebuild(self, parent, files):
239 239 self.clear()
240 240 umask = os.umask(0)
241 241 os.umask(umask)
242 242 for f, mode in files:
243 243 if mode:
244 244 self.map[f] = ('n', ~umask, -1, 0)
245 245 else:
246 246 self.map[f] = ('n', ~umask & 0666, -1, 0)
247 247 self.pl = (parent, nullid)
248 248 self.markdirty()
249 249
250 250 def write(self):
251 251 if not self.dirty:
252 252 return
253 253 st = self.opener("dirstate", "w", atomic=True)
254 254 st.write("".join(self.pl))
255 255 for f, e in self.map.items():
256 256 c = self.copied(f)
257 257 if c:
258 258 f = f + "\0" + c
259 259 e = struct.pack(self.format, e[0], e[1], e[2], e[3], len(f))
260 260 st.write(e + f)
261 261 self.dirty = 0
262 262
263 263 def filterfiles(self, files):
264 264 ret = {}
265 265 unknown = []
266 266
267 267 for x in files:
268 268 if x == '.':
269 269 return self.map.copy()
270 270 if x not in self.map:
271 271 unknown.append(x)
272 272 else:
273 273 ret[x] = self.map[x]
274 274
275 275 if not unknown:
276 276 return ret
277 277
278 278 b = self.map.keys()
279 279 b.sort()
280 280 blen = len(b)
281 281
282 282 for x in unknown:
283 283 bs = bisect.bisect(b, x)
284 284 if bs != 0 and b[bs-1] == x:
285 285 ret[x] = self.map[x]
286 286 continue
287 287 while bs < blen:
288 288 s = b[bs]
289 289 if len(s) > len(x) and s.startswith(x) and s[len(x)] == '/':
290 290 ret[s] = self.map[s]
291 291 else:
292 292 break
293 293 bs += 1
294 294 return ret
295 295
296 296 def supported_type(self, f, st, verbose=False):
297 297 if stat.S_ISREG(st.st_mode):
298 298 return True
299 299 if verbose:
300 300 kind = 'unknown'
301 301 if stat.S_ISCHR(st.st_mode): kind = _('character device')
302 302 elif stat.S_ISBLK(st.st_mode): kind = _('block device')
303 303 elif stat.S_ISFIFO(st.st_mode): kind = _('fifo')
304 304 elif stat.S_ISLNK(st.st_mode): kind = _('symbolic link')
305 305 elif stat.S_ISSOCK(st.st_mode): kind = _('socket')
306 306 elif stat.S_ISDIR(st.st_mode): kind = _('directory')
307 307 self.ui.warn(_('%s: unsupported file type (type is %s)\n') % (
308 308 util.pathto(self.getcwd(), f),
309 309 kind))
310 310 return False
311 311
312 312 def statwalk(self, files=None, match=util.always, dc=None, ignored=False,
313 313 badmatch=None):
314 314 self.lazyread()
315 315
316 316 # walk all files by default
317 317 if not files:
318 318 files = [self.root]
319 319 if not dc:
320 320 dc = self.map.copy()
321 321 elif not dc:
322 322 dc = self.filterfiles(files)
323 323
324 324 def statmatch(file_, stat):
325 325 file_ = util.pconvert(file_)
326 326 if not ignored and file_ not in dc and self.ignore(file_):
327 327 return False
328 328 return match(file_)
329 329
330 330 return self.walkhelper(files=files, statmatch=statmatch, dc=dc,
331 331 badmatch=badmatch)
332 332
333 333 def walk(self, files=None, match=util.always, dc=None, badmatch=None):
334 334 # filter out the stat
335 335 for src, f, st in self.statwalk(files, match, dc, badmatch=badmatch):
336 336 yield src, f
337 337
338 338 # walk recursively through the directory tree, finding all files
339 339 # matched by the statmatch function
340 340 #
341 341 # results are yielded in a tuple (src, filename, st), where src
342 342 # is one of:
343 343 # 'f' the file was found in the directory tree
344 344 # 'm' the file was only in the dirstate and not in the tree
345 345 # and st is the stat result if the file was found in the directory.
346 346 #
347 347 # dc is an optional arg for the current dirstate. dc is not modified
348 348 # directly by this function, but might be modified by your statmatch call.
349 349 #
350 350 def walkhelper(self, files, statmatch, dc, badmatch=None):
351 351 # recursion free walker, faster than os.walk.
352 352 def findfiles(s):
353 353 work = [s]
354 354 while work:
355 355 top = work.pop()
356 356 names = os.listdir(top)
357 357 names.sort()
358 358 # nd is the top of the repository dir tree
359 359 nd = util.normpath(top[len(self.root) + 1:])
360 360 if nd == '.':
361 361 nd = ''
362 362 else:
363 363 # do not recurse into a repo contained in this
364 364 # one. use bisect to find .hg directory so speed
365 365 # is good on big directory.
366 366 hg = bisect.bisect_left(names, '.hg')
367 367 if hg < len(names) and names[hg] == '.hg':
368 368 if os.path.isdir(os.path.join(top, '.hg')):
369 369 continue
370 370 for f in names:
371 371 np = util.pconvert(os.path.join(nd, f))
372 372 if seen(np):
373 373 continue
374 374 p = os.path.join(top, f)
375 375 # don't trip over symlinks
376 376 st = os.lstat(p)
377 377 if stat.S_ISDIR(st.st_mode):
378 378 ds = os.path.join(nd, f +'/')
379 379 if statmatch(ds, st):
380 380 work.append(p)
381 381 if statmatch(np, st) and np in dc:
382 382 yield 'm', np, st
383 383 elif statmatch(np, st):
384 384 if self.supported_type(np, st):
385 385 yield 'f', np, st
386 386 elif np in dc:
387 387 yield 'm', np, st
388 388
389 389 known = {'.hg': 1}
390 390 def seen(fn):
391 391 if fn in known: return True
392 392 known[fn] = 1
393 393
394 394 # step one, find all files that match our criteria
395 395 files.sort()
396 396 for ff in util.unique(files):
397 397 f = self.wjoin(ff)
398 398 try:
399 399 st = os.lstat(f)
400 400 except OSError, inst:
401 401 nf = util.normpath(ff)
402 402 found = False
403 403 for fn in dc:
404 404 if nf == fn or (fn.startswith(nf) and fn[len(nf)] == '/'):
405 405 found = True
406 406 break
407 407 if not found:
408 408 if inst.errno != errno.ENOENT or not badmatch:
409 409 self.ui.warn('%s: %s\n' % (
410 410 util.pathto(self.getcwd(), ff),
411 411 inst.strerror))
412 412 elif badmatch and badmatch(ff) and statmatch(ff, None):
413 413 yield 'b', ff, None
414 414 continue
415 415 if stat.S_ISDIR(st.st_mode):
416 416 cmp1 = (lambda x, y: cmp(x[1], y[1]))
417 417 sorted_ = [ x for x in findfiles(f) ]
418 418 sorted_.sort(cmp1)
419 419 for e in sorted_:
420 420 yield e
421 421 else:
422 422 ff = util.normpath(ff)
423 423 if seen(ff):
424 424 continue
425 425 self.blockignore = True
426 426 if statmatch(ff, st):
427 427 if self.supported_type(ff, st, verbose=True):
428 428 yield 'f', ff, st
429 429 elif ff in dc:
430 430 yield 'm', ff, st
431 431 self.blockignore = False
432 432
433 433 # step two run through anything left in the dc hash and yield
434 434 # if we haven't already seen it
435 435 ks = dc.keys()
436 436 ks.sort()
437 437 for k in ks:
438 438 if not seen(k) and (statmatch(k, None)):
439 439 yield 'm', k, None
440 440
441 441 def changes(self, files=None, match=util.always, show_ignored=None):
442 442 lookup, modified, added, unknown, ignored = [], [], [], [], []
443 443 removed, deleted = [], []
444 444
445 445 for src, fn, st in self.statwalk(files, match, ignored=show_ignored):
446 446 try:
447 447 type_, mode, size, time = self[fn]
448 448 except KeyError:
449 449 if show_ignored and self.ignore(fn):
450 450 ignored.append(fn)
451 451 else:
452 452 unknown.append(fn)
453 453 continue
454 454 if src == 'm':
455 455 nonexistent = True
456 456 if not st:
457 457 try:
458 458 st = os.lstat(self.wjoin(fn))
459 459 except OSError, inst:
460 460 if inst.errno != errno.ENOENT:
461 461 raise
462 462 st = None
463 463 # We need to re-check that it is a valid file
464 464 if st and self.supported_type(fn, st):
465 465 nonexistent = False
466 466 # XXX: what to do with file no longer present in the fs
467 467 # who are not removed in the dirstate ?
468 468 if nonexistent and type_ in "nm":
469 469 deleted.append(fn)
470 470 continue
471 471 # check the common case first
472 472 if type_ == 'n':
473 473 if not st:
474 st = os.stat(self.wjoin(fn))
474 st = os.lstat(self.wjoin(fn))
475 475 if size >= 0 and (size != st.st_size
476 476 or (mode ^ st.st_mode) & 0100):
477 477 modified.append(fn)
478 478 elif time != st.st_mtime:
479 479 lookup.append(fn)
480 480 elif type_ == 'm':
481 481 modified.append(fn)
482 482 elif type_ == 'a':
483 483 added.append(fn)
484 484 elif type_ == 'r':
485 485 removed.append(fn)
486 486
487 487 return (lookup, modified, added, removed, deleted, unknown, ignored)
@@ -1,2145 +1,2145 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import os, util
9 9 import filelog, manifest, changelog, dirstate, repo
10 10 from node import *
11 11 from i18n import gettext as _
12 12 from demandload import *
13 13 demandload(globals(), "appendfile changegroup")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "revlog")
16 16
17 17 class localrepository(object):
18 18 capabilities = ()
19 19
20 20 def __del__(self):
21 21 self.transhandle = None
22 22 def __init__(self, parentui, path=None, create=0):
23 23 if not path:
24 24 p = os.getcwd()
25 25 while not os.path.isdir(os.path.join(p, ".hg")):
26 26 oldp = p
27 27 p = os.path.dirname(p)
28 28 if p == oldp:
29 29 raise repo.RepoError(_("no repo found"))
30 30 path = p
31 31 self.path = os.path.join(path, ".hg")
32 32
33 33 if not create and not os.path.isdir(self.path):
34 34 raise repo.RepoError(_("repository %s not found") % path)
35 35
36 36 self.root = os.path.abspath(path)
37 37 self.origroot = path
38 38 self.ui = ui.ui(parentui=parentui)
39 39 self.opener = util.opener(self.path)
40 40 self.wopener = util.opener(self.root)
41 41
42 42 try:
43 43 self.ui.readconfig(self.join("hgrc"), self.root)
44 44 except IOError:
45 45 pass
46 46
47 47 v = self.ui.revlogopts
48 48 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
49 49 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
50 50 fl = v.get('flags', None)
51 51 flags = 0
52 52 if fl != None:
53 53 for x in fl.split():
54 54 flags |= revlog.flagstr(x)
55 55 elif self.revlogv1:
56 56 flags = revlog.REVLOG_DEFAULT_FLAGS
57 57
58 58 v = self.revlogversion | flags
59 59 self.manifest = manifest.manifest(self.opener, v)
60 60 self.changelog = changelog.changelog(self.opener, v)
61 61
62 62 # the changelog might not have the inline index flag
63 63 # on. If the format of the changelog is the same as found in
64 64 # .hgrc, apply any flags found in the .hgrc as well.
65 65 # Otherwise, just version from the changelog
66 66 v = self.changelog.version
67 67 if v == self.revlogversion:
68 68 v |= flags
69 69 self.revlogversion = v
70 70
71 71 self.tagscache = None
72 72 self.nodetagscache = None
73 73 self.encodepats = None
74 74 self.decodepats = None
75 75 self.transhandle = None
76 76
77 77 if create:
78 78 os.mkdir(self.path)
79 79 os.mkdir(self.join("data"))
80 80
81 81 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
82 82
83 83 def hook(self, name, throw=False, **args):
84 84 def callhook(hname, funcname):
85 85 '''call python hook. hook is callable object, looked up as
86 86 name in python module. if callable returns "true", hook
87 87 fails, else passes. if hook raises exception, treated as
88 88 hook failure. exception propagates if throw is "true".
89 89
90 90 reason for "true" meaning "hook failed" is so that
91 91 unmodified commands (e.g. mercurial.commands.update) can
92 92 be run as hooks without wrappers to convert return values.'''
93 93
94 94 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
95 95 d = funcname.rfind('.')
96 96 if d == -1:
97 97 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
98 98 % (hname, funcname))
99 99 modname = funcname[:d]
100 100 try:
101 101 obj = __import__(modname)
102 102 except ImportError:
103 103 raise util.Abort(_('%s hook is invalid '
104 104 '(import of "%s" failed)') %
105 105 (hname, modname))
106 106 try:
107 107 for p in funcname.split('.')[1:]:
108 108 obj = getattr(obj, p)
109 109 except AttributeError, err:
110 110 raise util.Abort(_('%s hook is invalid '
111 111 '("%s" is not defined)') %
112 112 (hname, funcname))
113 113 if not callable(obj):
114 114 raise util.Abort(_('%s hook is invalid '
115 115 '("%s" is not callable)') %
116 116 (hname, funcname))
117 117 try:
118 118 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
119 119 except (KeyboardInterrupt, util.SignalInterrupt):
120 120 raise
121 121 except Exception, exc:
122 122 if isinstance(exc, util.Abort):
123 123 self.ui.warn(_('error: %s hook failed: %s\n') %
124 124 (hname, exc.args[0] % exc.args[1:]))
125 125 else:
126 126 self.ui.warn(_('error: %s hook raised an exception: '
127 127 '%s\n') % (hname, exc))
128 128 if throw:
129 129 raise
130 130 self.ui.print_exc()
131 131 return True
132 132 if r:
133 133 if throw:
134 134 raise util.Abort(_('%s hook failed') % hname)
135 135 self.ui.warn(_('warning: %s hook failed\n') % hname)
136 136 return r
137 137
138 138 def runhook(name, cmd):
139 139 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
140 140 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
141 141 r = util.system(cmd, environ=env, cwd=self.root)
142 142 if r:
143 143 desc, r = util.explain_exit(r)
144 144 if throw:
145 145 raise util.Abort(_('%s hook %s') % (name, desc))
146 146 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
147 147 return r
148 148
149 149 r = False
150 150 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
151 151 if hname.split(".", 1)[0] == name and cmd]
152 152 hooks.sort()
153 153 for hname, cmd in hooks:
154 154 if cmd.startswith('python:'):
155 155 r = callhook(hname, cmd[7:].strip()) or r
156 156 else:
157 157 r = runhook(hname, cmd) or r
158 158 return r
159 159
160 160 def tags(self):
161 161 '''return a mapping of tag to node'''
162 162 if not self.tagscache:
163 163 self.tagscache = {}
164 164
165 165 def parsetag(line, context):
166 166 if not line:
167 167 return
168 168 s = l.split(" ", 1)
169 169 if len(s) != 2:
170 170 self.ui.warn(_("%s: cannot parse entry\n") % context)
171 171 return
172 172 node, key = s
173 173 key = key.strip()
174 174 try:
175 175 bin_n = bin(node)
176 176 except TypeError:
177 177 self.ui.warn(_("%s: node '%s' is not well formed\n") %
178 178 (context, node))
179 179 return
180 180 if bin_n not in self.changelog.nodemap:
181 181 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
182 182 (context, key))
183 183 return
184 184 self.tagscache[key] = bin_n
185 185
186 186 # read the tags file from each head, ending with the tip,
187 187 # and add each tag found to the map, with "newer" ones
188 188 # taking precedence
189 189 heads = self.heads()
190 190 heads.reverse()
191 191 fl = self.file(".hgtags")
192 192 for node in heads:
193 193 change = self.changelog.read(node)
194 194 rev = self.changelog.rev(node)
195 195 fn, ff = self.manifest.find(change[0], '.hgtags')
196 196 if fn is None: continue
197 197 count = 0
198 198 for l in fl.read(fn).splitlines():
199 199 count += 1
200 200 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
201 201 (rev, short(node), count))
202 202 try:
203 203 f = self.opener("localtags")
204 204 count = 0
205 205 for l in f:
206 206 count += 1
207 207 parsetag(l, _("localtags, line %d") % count)
208 208 except IOError:
209 209 pass
210 210
211 211 self.tagscache['tip'] = self.changelog.tip()
212 212
213 213 return self.tagscache
214 214
215 215 def tagslist(self):
216 216 '''return a list of tags ordered by revision'''
217 217 l = []
218 218 for t, n in self.tags().items():
219 219 try:
220 220 r = self.changelog.rev(n)
221 221 except:
222 222 r = -2 # sort to the beginning of the list if unknown
223 223 l.append((r, t, n))
224 224 l.sort()
225 225 return [(t, n) for r, t, n in l]
226 226
227 227 def nodetags(self, node):
228 228 '''return the tags associated with a node'''
229 229 if not self.nodetagscache:
230 230 self.nodetagscache = {}
231 231 for t, n in self.tags().items():
232 232 self.nodetagscache.setdefault(n, []).append(t)
233 233 return self.nodetagscache.get(node, [])
234 234
235 235 def lookup(self, key):
236 236 try:
237 237 return self.tags()[key]
238 238 except KeyError:
239 239 try:
240 240 return self.changelog.lookup(key)
241 241 except:
242 242 raise repo.RepoError(_("unknown revision '%s'") % key)
243 243
244 244 def dev(self):
245 return os.stat(self.path).st_dev
245 return os.lstat(self.path).st_dev
246 246
247 247 def local(self):
248 248 return True
249 249
250 250 def join(self, f):
251 251 return os.path.join(self.path, f)
252 252
253 253 def wjoin(self, f):
254 254 return os.path.join(self.root, f)
255 255
256 256 def file(self, f):
257 257 if f[0] == '/':
258 258 f = f[1:]
259 259 return filelog.filelog(self.opener, f, self.revlogversion)
260 260
261 261 def getcwd(self):
262 262 return self.dirstate.getcwd()
263 263
264 264 def wfile(self, f, mode='r'):
265 265 return self.wopener(f, mode)
266 266
267 267 def wread(self, filename):
268 268 if self.encodepats == None:
269 269 l = []
270 270 for pat, cmd in self.ui.configitems("encode"):
271 271 mf = util.matcher(self.root, "", [pat], [], [])[1]
272 272 l.append((mf, cmd))
273 273 self.encodepats = l
274 274
275 275 data = self.wopener(filename, 'r').read()
276 276
277 277 for mf, cmd in self.encodepats:
278 278 if mf(filename):
279 279 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
280 280 data = util.filter(data, cmd)
281 281 break
282 282
283 283 return data
284 284
285 285 def wwrite(self, filename, data, fd=None):
286 286 if self.decodepats == None:
287 287 l = []
288 288 for pat, cmd in self.ui.configitems("decode"):
289 289 mf = util.matcher(self.root, "", [pat], [], [])[1]
290 290 l.append((mf, cmd))
291 291 self.decodepats = l
292 292
293 293 for mf, cmd in self.decodepats:
294 294 if mf(filename):
295 295 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
296 296 data = util.filter(data, cmd)
297 297 break
298 298
299 299 if fd:
300 300 return fd.write(data)
301 301 return self.wopener(filename, 'w').write(data)
302 302
303 303 def transaction(self):
304 304 tr = self.transhandle
305 305 if tr != None and tr.running():
306 306 return tr.nest()
307 307
308 308 # save dirstate for rollback
309 309 try:
310 310 ds = self.opener("dirstate").read()
311 311 except IOError:
312 312 ds = ""
313 313 self.opener("journal.dirstate", "w").write(ds)
314 314
315 315 tr = transaction.transaction(self.ui.warn, self.opener,
316 316 self.join("journal"),
317 317 aftertrans(self.path))
318 318 self.transhandle = tr
319 319 return tr
320 320
321 321 def recover(self):
322 322 l = self.lock()
323 323 if os.path.exists(self.join("journal")):
324 324 self.ui.status(_("rolling back interrupted transaction\n"))
325 325 transaction.rollback(self.opener, self.join("journal"))
326 326 self.reload()
327 327 return True
328 328 else:
329 329 self.ui.warn(_("no interrupted transaction available\n"))
330 330 return False
331 331
332 332 def rollback(self, wlock=None):
333 333 if not wlock:
334 334 wlock = self.wlock()
335 335 l = self.lock()
336 336 if os.path.exists(self.join("undo")):
337 337 self.ui.status(_("rolling back last transaction\n"))
338 338 transaction.rollback(self.opener, self.join("undo"))
339 339 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
340 340 self.reload()
341 341 self.wreload()
342 342 else:
343 343 self.ui.warn(_("no rollback information available\n"))
344 344
345 345 def wreload(self):
346 346 self.dirstate.read()
347 347
348 348 def reload(self):
349 349 self.changelog.load()
350 350 self.manifest.load()
351 351 self.tagscache = None
352 352 self.nodetagscache = None
353 353
354 354 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
355 355 desc=None):
356 356 try:
357 357 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
358 358 except lock.LockHeld, inst:
359 359 if not wait:
360 360 raise
361 361 self.ui.warn(_("waiting for lock on %s held by %s\n") %
362 362 (desc, inst.args[0]))
363 363 # default to 600 seconds timeout
364 364 l = lock.lock(self.join(lockname),
365 365 int(self.ui.config("ui", "timeout") or 600),
366 366 releasefn, desc=desc)
367 367 if acquirefn:
368 368 acquirefn()
369 369 return l
370 370
371 371 def lock(self, wait=1):
372 372 return self.do_lock("lock", wait, acquirefn=self.reload,
373 373 desc=_('repository %s') % self.origroot)
374 374
375 375 def wlock(self, wait=1):
376 376 return self.do_lock("wlock", wait, self.dirstate.write,
377 377 self.wreload,
378 378 desc=_('working directory of %s') % self.origroot)
379 379
380 380 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
381 381 "determine whether a new filenode is needed"
382 382 fp1 = manifest1.get(filename, nullid)
383 383 fp2 = manifest2.get(filename, nullid)
384 384
385 385 if fp2 != nullid:
386 386 # is one parent an ancestor of the other?
387 387 fpa = filelog.ancestor(fp1, fp2)
388 388 if fpa == fp1:
389 389 fp1, fp2 = fp2, nullid
390 390 elif fpa == fp2:
391 391 fp2 = nullid
392 392
393 393 # is the file unmodified from the parent? report existing entry
394 394 if fp2 == nullid and text == filelog.read(fp1):
395 395 return (fp1, None, None)
396 396
397 397 return (None, fp1, fp2)
398 398
399 399 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
400 400 orig_parent = self.dirstate.parents()[0] or nullid
401 401 p1 = p1 or self.dirstate.parents()[0] or nullid
402 402 p2 = p2 or self.dirstate.parents()[1] or nullid
403 403 c1 = self.changelog.read(p1)
404 404 c2 = self.changelog.read(p2)
405 405 m1 = self.manifest.read(c1[0])
406 406 mf1 = self.manifest.readflags(c1[0])
407 407 m2 = self.manifest.read(c2[0])
408 408 changed = []
409 409
410 410 if orig_parent == p1:
411 411 update_dirstate = 1
412 412 else:
413 413 update_dirstate = 0
414 414
415 415 if not wlock:
416 416 wlock = self.wlock()
417 417 l = self.lock()
418 418 tr = self.transaction()
419 419 mm = m1.copy()
420 420 mfm = mf1.copy()
421 421 linkrev = self.changelog.count()
422 422 for f in files:
423 423 try:
424 424 t = self.wread(f)
425 425 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
426 426 r = self.file(f)
427 427 mfm[f] = tm
428 428
429 429 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
430 430 if entry:
431 431 mm[f] = entry
432 432 continue
433 433
434 434 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
435 435 changed.append(f)
436 436 if update_dirstate:
437 437 self.dirstate.update([f], "n")
438 438 except IOError:
439 439 try:
440 440 del mm[f]
441 441 del mfm[f]
442 442 if update_dirstate:
443 443 self.dirstate.forget([f])
444 444 except:
445 445 # deleted from p2?
446 446 pass
447 447
448 448 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
449 449 user = user or self.ui.username()
450 450 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
451 451 tr.close()
452 452 if update_dirstate:
453 453 self.dirstate.setparents(n, nullid)
454 454
455 455 def commit(self, files=None, text="", user=None, date=None,
456 456 match=util.always, force=False, lock=None, wlock=None,
457 457 force_editor=False):
458 458 commit = []
459 459 remove = []
460 460 changed = []
461 461
462 462 if files:
463 463 for f in files:
464 464 s = self.dirstate.state(f)
465 465 if s in 'nmai':
466 466 commit.append(f)
467 467 elif s == 'r':
468 468 remove.append(f)
469 469 else:
470 470 self.ui.warn(_("%s not tracked!\n") % f)
471 471 else:
472 472 modified, added, removed, deleted, unknown = self.changes(match=match)
473 473 commit = modified + added
474 474 remove = removed
475 475
476 476 p1, p2 = self.dirstate.parents()
477 477 c1 = self.changelog.read(p1)
478 478 c2 = self.changelog.read(p2)
479 479 m1 = self.manifest.read(c1[0])
480 480 mf1 = self.manifest.readflags(c1[0])
481 481 m2 = self.manifest.read(c2[0])
482 482
483 483 if not commit and not remove and not force and p2 == nullid:
484 484 self.ui.status(_("nothing changed\n"))
485 485 return None
486 486
487 487 xp1 = hex(p1)
488 488 if p2 == nullid: xp2 = ''
489 489 else: xp2 = hex(p2)
490 490
491 491 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
492 492
493 493 if not wlock:
494 494 wlock = self.wlock()
495 495 if not lock:
496 496 lock = self.lock()
497 497 tr = self.transaction()
498 498
499 499 # check in files
500 500 new = {}
501 501 linkrev = self.changelog.count()
502 502 commit.sort()
503 503 for f in commit:
504 504 self.ui.note(f + "\n")
505 505 try:
506 506 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
507 507 t = self.wread(f)
508 508 except IOError:
509 509 self.ui.warn(_("trouble committing %s!\n") % f)
510 510 raise
511 511
512 512 r = self.file(f)
513 513
514 514 meta = {}
515 515 cp = self.dirstate.copied(f)
516 516 if cp:
517 517 meta["copy"] = cp
518 518 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
519 519 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
520 520 fp1, fp2 = nullid, nullid
521 521 else:
522 522 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
523 523 if entry:
524 524 new[f] = entry
525 525 continue
526 526
527 527 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
528 528 # remember what we've added so that we can later calculate
529 529 # the files to pull from a set of changesets
530 530 changed.append(f)
531 531
532 532 # update manifest
533 533 m1 = m1.copy()
534 534 m1.update(new)
535 535 for f in remove:
536 536 if f in m1:
537 537 del m1[f]
538 538 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
539 539 (new, remove))
540 540
541 541 # add changeset
542 542 new = new.keys()
543 543 new.sort()
544 544
545 545 user = user or self.ui.username()
546 546 if not text or force_editor:
547 547 edittext = []
548 548 if text:
549 549 edittext.append(text)
550 550 edittext.append("")
551 551 if p2 != nullid:
552 552 edittext.append("HG: branch merge")
553 553 edittext.extend(["HG: changed %s" % f for f in changed])
554 554 edittext.extend(["HG: removed %s" % f for f in remove])
555 555 if not changed and not remove:
556 556 edittext.append("HG: no files changed")
557 557 edittext.append("")
558 558 # run editor in the repository root
559 559 olddir = os.getcwd()
560 560 os.chdir(self.root)
561 561 text = self.ui.edit("\n".join(edittext), user)
562 562 os.chdir(olddir)
563 563
564 564 lines = [line.rstrip() for line in text.rstrip().splitlines()]
565 565 while lines and not lines[0]:
566 566 del lines[0]
567 567 if not lines:
568 568 return None
569 569 text = '\n'.join(lines)
570 570 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
571 571 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
572 572 parent2=xp2)
573 573 tr.close()
574 574
575 575 self.dirstate.setparents(n)
576 576 self.dirstate.update(new, "n")
577 577 self.dirstate.forget(remove)
578 578
579 579 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
580 580 return n
581 581
582 582 def walk(self, node=None, files=[], match=util.always, badmatch=None):
583 583 if node:
584 584 fdict = dict.fromkeys(files)
585 585 for fn in self.manifest.read(self.changelog.read(node)[0]):
586 586 fdict.pop(fn, None)
587 587 if match(fn):
588 588 yield 'm', fn
589 589 for fn in fdict:
590 590 if badmatch and badmatch(fn):
591 591 if match(fn):
592 592 yield 'b', fn
593 593 else:
594 594 self.ui.warn(_('%s: No such file in rev %s\n') % (
595 595 util.pathto(self.getcwd(), fn), short(node)))
596 596 else:
597 597 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
598 598 yield src, fn
599 599
600 600 def changes(self, node1=None, node2=None, files=[], match=util.always,
601 601 wlock=None, show_ignored=None):
602 602 """return changes between two nodes or node and working directory
603 603
604 604 If node1 is None, use the first dirstate parent instead.
605 605 If node2 is None, compare node1 with working directory.
606 606 """
607 607
608 608 def fcmp(fn, mf):
609 609 t1 = self.wread(fn)
610 610 t2 = self.file(fn).read(mf.get(fn, nullid))
611 611 return cmp(t1, t2)
612 612
613 613 def mfmatches(node):
614 614 change = self.changelog.read(node)
615 615 mf = dict(self.manifest.read(change[0]))
616 616 for fn in mf.keys():
617 617 if not match(fn):
618 618 del mf[fn]
619 619 return mf
620 620
621 621 if node1:
622 622 # read the manifest from node1 before the manifest from node2,
623 623 # so that we'll hit the manifest cache if we're going through
624 624 # all the revisions in parent->child order.
625 625 mf1 = mfmatches(node1)
626 626
627 627 # are we comparing the working directory?
628 628 if not node2:
629 629 if not wlock:
630 630 try:
631 631 wlock = self.wlock(wait=0)
632 632 except lock.LockException:
633 633 wlock = None
634 634 lookup, modified, added, removed, deleted, unknown, ignored = (
635 635 self.dirstate.changes(files, match, show_ignored))
636 636
637 637 # are we comparing working dir against its parent?
638 638 if not node1:
639 639 if lookup:
640 640 # do a full compare of any files that might have changed
641 641 mf2 = mfmatches(self.dirstate.parents()[0])
642 642 for f in lookup:
643 643 if fcmp(f, mf2):
644 644 modified.append(f)
645 645 elif wlock is not None:
646 646 self.dirstate.update([f], "n")
647 647 else:
648 648 # we are comparing working dir against non-parent
649 649 # generate a pseudo-manifest for the working dir
650 650 mf2 = mfmatches(self.dirstate.parents()[0])
651 651 for f in lookup + modified + added:
652 652 mf2[f] = ""
653 653 for f in removed:
654 654 if f in mf2:
655 655 del mf2[f]
656 656 else:
657 657 # we are comparing two revisions
658 658 deleted, unknown, ignored = [], [], []
659 659 mf2 = mfmatches(node2)
660 660
661 661 if node1:
662 662 # flush lists from dirstate before comparing manifests
663 663 modified, added = [], []
664 664
665 665 for fn in mf2:
666 666 if mf1.has_key(fn):
667 667 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
668 668 modified.append(fn)
669 669 del mf1[fn]
670 670 else:
671 671 added.append(fn)
672 672
673 673 removed = mf1.keys()
674 674
675 675 # sort and return results:
676 676 for l in modified, added, removed, deleted, unknown, ignored:
677 677 l.sort()
678 678 if show_ignored is None:
679 679 return (modified, added, removed, deleted, unknown)
680 680 else:
681 681 return (modified, added, removed, deleted, unknown, ignored)
682 682
683 683 def add(self, list, wlock=None):
684 684 if not wlock:
685 685 wlock = self.wlock()
686 686 for f in list:
687 687 p = self.wjoin(f)
688 688 if not os.path.exists(p):
689 689 self.ui.warn(_("%s does not exist!\n") % f)
690 690 elif not os.path.isfile(p):
691 691 self.ui.warn(_("%s not added: only files supported currently\n")
692 692 % f)
693 693 elif self.dirstate.state(f) in 'an':
694 694 self.ui.warn(_("%s already tracked!\n") % f)
695 695 else:
696 696 self.dirstate.update([f], "a")
697 697
698 698 def forget(self, list, wlock=None):
699 699 if not wlock:
700 700 wlock = self.wlock()
701 701 for f in list:
702 702 if self.dirstate.state(f) not in 'ai':
703 703 self.ui.warn(_("%s not added!\n") % f)
704 704 else:
705 705 self.dirstate.forget([f])
706 706
707 707 def remove(self, list, unlink=False, wlock=None):
708 708 if unlink:
709 709 for f in list:
710 710 try:
711 711 util.unlink(self.wjoin(f))
712 712 except OSError, inst:
713 713 if inst.errno != errno.ENOENT:
714 714 raise
715 715 if not wlock:
716 716 wlock = self.wlock()
717 717 for f in list:
718 718 p = self.wjoin(f)
719 719 if os.path.exists(p):
720 720 self.ui.warn(_("%s still exists!\n") % f)
721 721 elif self.dirstate.state(f) == 'a':
722 722 self.dirstate.forget([f])
723 723 elif f not in self.dirstate:
724 724 self.ui.warn(_("%s not tracked!\n") % f)
725 725 else:
726 726 self.dirstate.update([f], "r")
727 727
728 728 def undelete(self, list, wlock=None):
729 729 p = self.dirstate.parents()[0]
730 730 mn = self.changelog.read(p)[0]
731 731 mf = self.manifest.readflags(mn)
732 732 m = self.manifest.read(mn)
733 733 if not wlock:
734 734 wlock = self.wlock()
735 735 for f in list:
736 736 if self.dirstate.state(f) not in "r":
737 737 self.ui.warn("%s not removed!\n" % f)
738 738 else:
739 739 t = self.file(f).read(m[f])
740 740 self.wwrite(f, t)
741 741 util.set_exec(self.wjoin(f), mf[f])
742 742 self.dirstate.update([f], "n")
743 743
744 744 def copy(self, source, dest, wlock=None):
745 745 p = self.wjoin(dest)
746 746 if not os.path.exists(p):
747 747 self.ui.warn(_("%s does not exist!\n") % dest)
748 748 elif not os.path.isfile(p):
749 749 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
750 750 else:
751 751 if not wlock:
752 752 wlock = self.wlock()
753 753 if self.dirstate.state(dest) == '?':
754 754 self.dirstate.update([dest], "a")
755 755 self.dirstate.copy(source, dest)
756 756
757 757 def heads(self, start=None):
758 758 heads = self.changelog.heads(start)
759 759 # sort the output in rev descending order
760 760 heads = [(-self.changelog.rev(h), h) for h in heads]
761 761 heads.sort()
762 762 return [n for (r, n) in heads]
763 763
764 764 # branchlookup returns a dict giving a list of branches for
765 765 # each head. A branch is defined as the tag of a node or
766 766 # the branch of the node's parents. If a node has multiple
767 767 # branch tags, tags are eliminated if they are visible from other
768 768 # branch tags.
769 769 #
770 770 # So, for this graph: a->b->c->d->e
771 771 # \ /
772 772 # aa -----/
773 773 # a has tag 2.6.12
774 774 # d has tag 2.6.13
775 775 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
776 776 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
777 777 # from the list.
778 778 #
779 779 # It is possible that more than one head will have the same branch tag.
780 780 # callers need to check the result for multiple heads under the same
781 781 # branch tag if that is a problem for them (ie checkout of a specific
782 782 # branch).
783 783 #
784 784 # passing in a specific branch will limit the depth of the search
785 785 # through the parents. It won't limit the branches returned in the
786 786 # result though.
787 787 def branchlookup(self, heads=None, branch=None):
788 788 if not heads:
789 789 heads = self.heads()
790 790 headt = [ h for h in heads ]
791 791 chlog = self.changelog
792 792 branches = {}
793 793 merges = []
794 794 seenmerge = {}
795 795
796 796 # traverse the tree once for each head, recording in the branches
797 797 # dict which tags are visible from this head. The branches
798 798 # dict also records which tags are visible from each tag
799 799 # while we traverse.
800 800 while headt or merges:
801 801 if merges:
802 802 n, found = merges.pop()
803 803 visit = [n]
804 804 else:
805 805 h = headt.pop()
806 806 visit = [h]
807 807 found = [h]
808 808 seen = {}
809 809 while visit:
810 810 n = visit.pop()
811 811 if n in seen:
812 812 continue
813 813 pp = chlog.parents(n)
814 814 tags = self.nodetags(n)
815 815 if tags:
816 816 for x in tags:
817 817 if x == 'tip':
818 818 continue
819 819 for f in found:
820 820 branches.setdefault(f, {})[n] = 1
821 821 branches.setdefault(n, {})[n] = 1
822 822 break
823 823 if n not in found:
824 824 found.append(n)
825 825 if branch in tags:
826 826 continue
827 827 seen[n] = 1
828 828 if pp[1] != nullid and n not in seenmerge:
829 829 merges.append((pp[1], [x for x in found]))
830 830 seenmerge[n] = 1
831 831 if pp[0] != nullid:
832 832 visit.append(pp[0])
833 833 # traverse the branches dict, eliminating branch tags from each
834 834 # head that are visible from another branch tag for that head.
835 835 out = {}
836 836 viscache = {}
837 837 for h in heads:
838 838 def visible(node):
839 839 if node in viscache:
840 840 return viscache[node]
841 841 ret = {}
842 842 visit = [node]
843 843 while visit:
844 844 x = visit.pop()
845 845 if x in viscache:
846 846 ret.update(viscache[x])
847 847 elif x not in ret:
848 848 ret[x] = 1
849 849 if x in branches:
850 850 visit[len(visit):] = branches[x].keys()
851 851 viscache[node] = ret
852 852 return ret
853 853 if h not in branches:
854 854 continue
855 855 # O(n^2), but somewhat limited. This only searches the
856 856 # tags visible from a specific head, not all the tags in the
857 857 # whole repo.
858 858 for b in branches[h]:
859 859 vis = False
860 860 for bb in branches[h].keys():
861 861 if b != bb:
862 862 if b in visible(bb):
863 863 vis = True
864 864 break
865 865 if not vis:
866 866 l = out.setdefault(h, [])
867 867 l[len(l):] = self.nodetags(b)
868 868 return out
869 869
870 870 def branches(self, nodes):
871 871 if not nodes:
872 872 nodes = [self.changelog.tip()]
873 873 b = []
874 874 for n in nodes:
875 875 t = n
876 876 while 1:
877 877 p = self.changelog.parents(n)
878 878 if p[1] != nullid or p[0] == nullid:
879 879 b.append((t, n, p[0], p[1]))
880 880 break
881 881 n = p[0]
882 882 return b
883 883
884 884 def between(self, pairs):
885 885 r = []
886 886
887 887 for top, bottom in pairs:
888 888 n, l, i = top, [], 0
889 889 f = 1
890 890
891 891 while n != bottom:
892 892 p = self.changelog.parents(n)[0]
893 893 if i == f:
894 894 l.append(n)
895 895 f = f * 2
896 896 n = p
897 897 i += 1
898 898
899 899 r.append(l)
900 900
901 901 return r
902 902
903 903 def findincoming(self, remote, base=None, heads=None, force=False):
904 904 """Return list of roots of the subsets of missing nodes from remote
905 905
906 906 If base dict is specified, assume that these nodes and their parents
907 907 exist on the remote side and that no child of a node of base exists
908 908 in both remote and self.
909 909 Furthermore base will be updated to include the nodes that exists
910 910 in self and remote but no children exists in self and remote.
911 911 If a list of heads is specified, return only nodes which are heads
912 912 or ancestors of these heads.
913 913
914 914 All the ancestors of base are in self and in remote.
915 915 All the descendants of the list returned are missing in self.
916 916 (and so we know that the rest of the nodes are missing in remote, see
917 917 outgoing)
918 918 """
919 919 m = self.changelog.nodemap
920 920 search = []
921 921 fetch = {}
922 922 seen = {}
923 923 seenbranch = {}
924 924 if base == None:
925 925 base = {}
926 926
927 927 if not heads:
928 928 heads = remote.heads()
929 929
930 930 if self.changelog.tip() == nullid:
931 931 base[nullid] = 1
932 932 if heads != [nullid]:
933 933 return [nullid]
934 934 return []
935 935
936 936 # assume we're closer to the tip than the root
937 937 # and start by examining the heads
938 938 self.ui.status(_("searching for changes\n"))
939 939
940 940 unknown = []
941 941 for h in heads:
942 942 if h not in m:
943 943 unknown.append(h)
944 944 else:
945 945 base[h] = 1
946 946
947 947 if not unknown:
948 948 return []
949 949
950 950 req = dict.fromkeys(unknown)
951 951 reqcnt = 0
952 952
953 953 # search through remote branches
954 954 # a 'branch' here is a linear segment of history, with four parts:
955 955 # head, root, first parent, second parent
956 956 # (a branch always has two parents (or none) by definition)
957 957 unknown = remote.branches(unknown)
958 958 while unknown:
959 959 r = []
960 960 while unknown:
961 961 n = unknown.pop(0)
962 962 if n[0] in seen:
963 963 continue
964 964
965 965 self.ui.debug(_("examining %s:%s\n")
966 966 % (short(n[0]), short(n[1])))
967 967 if n[0] == nullid: # found the end of the branch
968 968 pass
969 969 elif n in seenbranch:
970 970 self.ui.debug(_("branch already found\n"))
971 971 continue
972 972 elif n[1] and n[1] in m: # do we know the base?
973 973 self.ui.debug(_("found incomplete branch %s:%s\n")
974 974 % (short(n[0]), short(n[1])))
975 975 search.append(n) # schedule branch range for scanning
976 976 seenbranch[n] = 1
977 977 else:
978 978 if n[1] not in seen and n[1] not in fetch:
979 979 if n[2] in m and n[3] in m:
980 980 self.ui.debug(_("found new changeset %s\n") %
981 981 short(n[1]))
982 982 fetch[n[1]] = 1 # earliest unknown
983 983 for p in n[2:4]:
984 984 if p in m:
985 985 base[p] = 1 # latest known
986 986
987 987 for p in n[2:4]:
988 988 if p not in req and p not in m:
989 989 r.append(p)
990 990 req[p] = 1
991 991 seen[n[0]] = 1
992 992
993 993 if r:
994 994 reqcnt += 1
995 995 self.ui.debug(_("request %d: %s\n") %
996 996 (reqcnt, " ".join(map(short, r))))
997 997 for p in range(0, len(r), 10):
998 998 for b in remote.branches(r[p:p+10]):
999 999 self.ui.debug(_("received %s:%s\n") %
1000 1000 (short(b[0]), short(b[1])))
1001 1001 unknown.append(b)
1002 1002
1003 1003 # do binary search on the branches we found
1004 1004 while search:
1005 1005 n = search.pop(0)
1006 1006 reqcnt += 1
1007 1007 l = remote.between([(n[0], n[1])])[0]
1008 1008 l.append(n[1])
1009 1009 p = n[0]
1010 1010 f = 1
1011 1011 for i in l:
1012 1012 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1013 1013 if i in m:
1014 1014 if f <= 2:
1015 1015 self.ui.debug(_("found new branch changeset %s\n") %
1016 1016 short(p))
1017 1017 fetch[p] = 1
1018 1018 base[i] = 1
1019 1019 else:
1020 1020 self.ui.debug(_("narrowed branch search to %s:%s\n")
1021 1021 % (short(p), short(i)))
1022 1022 search.append((p, i))
1023 1023 break
1024 1024 p, f = i, f * 2
1025 1025
1026 1026 # sanity check our fetch list
1027 1027 for f in fetch.keys():
1028 1028 if f in m:
1029 1029 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1030 1030
1031 1031 if base.keys() == [nullid]:
1032 1032 if force:
1033 1033 self.ui.warn(_("warning: repository is unrelated\n"))
1034 1034 else:
1035 1035 raise util.Abort(_("repository is unrelated"))
1036 1036
1037 1037 self.ui.note(_("found new changesets starting at ") +
1038 1038 " ".join([short(f) for f in fetch]) + "\n")
1039 1039
1040 1040 self.ui.debug(_("%d total queries\n") % reqcnt)
1041 1041
1042 1042 return fetch.keys()
1043 1043
1044 1044 def findoutgoing(self, remote, base=None, heads=None, force=False):
1045 1045 """Return list of nodes that are roots of subsets not in remote
1046 1046
1047 1047 If base dict is specified, assume that these nodes and their parents
1048 1048 exist on the remote side.
1049 1049 If a list of heads is specified, return only nodes which are heads
1050 1050 or ancestors of these heads, and return a second element which
1051 1051 contains all remote heads which get new children.
1052 1052 """
1053 1053 if base == None:
1054 1054 base = {}
1055 1055 self.findincoming(remote, base, heads, force=force)
1056 1056
1057 1057 self.ui.debug(_("common changesets up to ")
1058 1058 + " ".join(map(short, base.keys())) + "\n")
1059 1059
1060 1060 remain = dict.fromkeys(self.changelog.nodemap)
1061 1061
1062 1062 # prune everything remote has from the tree
1063 1063 del remain[nullid]
1064 1064 remove = base.keys()
1065 1065 while remove:
1066 1066 n = remove.pop(0)
1067 1067 if n in remain:
1068 1068 del remain[n]
1069 1069 for p in self.changelog.parents(n):
1070 1070 remove.append(p)
1071 1071
1072 1072 # find every node whose parents have been pruned
1073 1073 subset = []
1074 1074 # find every remote head that will get new children
1075 1075 updated_heads = {}
1076 1076 for n in remain:
1077 1077 p1, p2 = self.changelog.parents(n)
1078 1078 if p1 not in remain and p2 not in remain:
1079 1079 subset.append(n)
1080 1080 if heads:
1081 1081 if p1 in heads:
1082 1082 updated_heads[p1] = True
1083 1083 if p2 in heads:
1084 1084 updated_heads[p2] = True
1085 1085
1086 1086 # this is the set of all roots we have to push
1087 1087 if heads:
1088 1088 return subset, updated_heads.keys()
1089 1089 else:
1090 1090 return subset
1091 1091
1092 1092 def pull(self, remote, heads=None, force=False):
1093 1093 l = self.lock()
1094 1094
1095 1095 fetch = self.findincoming(remote, force=force)
1096 1096 if fetch == [nullid]:
1097 1097 self.ui.status(_("requesting all changes\n"))
1098 1098
1099 1099 if not fetch:
1100 1100 self.ui.status(_("no changes found\n"))
1101 1101 return 0
1102 1102
1103 1103 if heads is None:
1104 1104 cg = remote.changegroup(fetch, 'pull')
1105 1105 else:
1106 1106 cg = remote.changegroupsubset(fetch, heads, 'pull')
1107 1107 return self.addchangegroup(cg, 'pull')
1108 1108
1109 1109 def push(self, remote, force=False, revs=None):
1110 1110 # there are two ways to push to remote repo:
1111 1111 #
1112 1112 # addchangegroup assumes local user can lock remote
1113 1113 # repo (local filesystem, old ssh servers).
1114 1114 #
1115 1115 # unbundle assumes local user cannot lock remote repo (new ssh
1116 1116 # servers, http servers).
1117 1117
1118 1118 if 'unbundle' in remote.capabilities:
1119 1119 self.push_unbundle(remote, force, revs)
1120 1120 else:
1121 1121 self.push_addchangegroup(remote, force, revs)
1122 1122
1123 1123 def prepush(self, remote, force, revs):
1124 1124 base = {}
1125 1125 remote_heads = remote.heads()
1126 1126 inc = self.findincoming(remote, base, remote_heads, force=force)
1127 1127 if not force and inc:
1128 1128 self.ui.warn(_("abort: unsynced remote changes!\n"))
1129 1129 self.ui.status(_("(did you forget to sync?"
1130 1130 " use push -f to force)\n"))
1131 1131 return None, 1
1132 1132
1133 1133 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1134 1134 if revs is not None:
1135 1135 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1136 1136 else:
1137 1137 bases, heads = update, self.changelog.heads()
1138 1138
1139 1139 if not bases:
1140 1140 self.ui.status(_("no changes found\n"))
1141 1141 return None, 1
1142 1142 elif not force:
1143 1143 # FIXME we don't properly detect creation of new heads
1144 1144 # in the push -r case, assume the user knows what he's doing
1145 1145 if not revs and len(remote_heads) < len(heads) \
1146 1146 and remote_heads != [nullid]:
1147 1147 self.ui.warn(_("abort: push creates new remote branches!\n"))
1148 1148 self.ui.status(_("(did you forget to merge?"
1149 1149 " use push -f to force)\n"))
1150 1150 return None, 1
1151 1151
1152 1152 if revs is None:
1153 1153 cg = self.changegroup(update, 'push')
1154 1154 else:
1155 1155 cg = self.changegroupsubset(update, revs, 'push')
1156 1156 return cg, remote_heads
1157 1157
1158 1158 def push_addchangegroup(self, remote, force, revs):
1159 1159 lock = remote.lock()
1160 1160
1161 1161 ret = self.prepush(remote, force, revs)
1162 1162 if ret[0] is not None:
1163 1163 cg, remote_heads = ret
1164 1164 return remote.addchangegroup(cg, 'push')
1165 1165 return ret[1]
1166 1166
1167 1167 def push_unbundle(self, remote, force, revs):
1168 1168 # local repo finds heads on server, finds out what revs it
1169 1169 # must push. once revs transferred, if server finds it has
1170 1170 # different heads (someone else won commit/push race), server
1171 1171 # aborts.
1172 1172
1173 1173 ret = self.prepush(remote, force, revs)
1174 1174 if ret[0] is not None:
1175 1175 cg, remote_heads = ret
1176 1176 if force: remote_heads = ['force']
1177 1177 return remote.unbundle(cg, remote_heads, 'push')
1178 1178 return ret[1]
1179 1179
1180 1180 def changegroupsubset(self, bases, heads, source):
1181 1181 """This function generates a changegroup consisting of all the nodes
1182 1182 that are descendents of any of the bases, and ancestors of any of
1183 1183 the heads.
1184 1184
1185 1185 It is fairly complex as determining which filenodes and which
1186 1186 manifest nodes need to be included for the changeset to be complete
1187 1187 is non-trivial.
1188 1188
1189 1189 Another wrinkle is doing the reverse, figuring out which changeset in
1190 1190 the changegroup a particular filenode or manifestnode belongs to."""
1191 1191
1192 1192 self.hook('preoutgoing', throw=True, source=source)
1193 1193
1194 1194 # Set up some initial variables
1195 1195 # Make it easy to refer to self.changelog
1196 1196 cl = self.changelog
1197 1197 # msng is short for missing - compute the list of changesets in this
1198 1198 # changegroup.
1199 1199 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1200 1200 # Some bases may turn out to be superfluous, and some heads may be
1201 1201 # too. nodesbetween will return the minimal set of bases and heads
1202 1202 # necessary to re-create the changegroup.
1203 1203
1204 1204 # Known heads are the list of heads that it is assumed the recipient
1205 1205 # of this changegroup will know about.
1206 1206 knownheads = {}
1207 1207 # We assume that all parents of bases are known heads.
1208 1208 for n in bases:
1209 1209 for p in cl.parents(n):
1210 1210 if p != nullid:
1211 1211 knownheads[p] = 1
1212 1212 knownheads = knownheads.keys()
1213 1213 if knownheads:
1214 1214 # Now that we know what heads are known, we can compute which
1215 1215 # changesets are known. The recipient must know about all
1216 1216 # changesets required to reach the known heads from the null
1217 1217 # changeset.
1218 1218 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1219 1219 junk = None
1220 1220 # Transform the list into an ersatz set.
1221 1221 has_cl_set = dict.fromkeys(has_cl_set)
1222 1222 else:
1223 1223 # If there were no known heads, the recipient cannot be assumed to
1224 1224 # know about any changesets.
1225 1225 has_cl_set = {}
1226 1226
1227 1227 # Make it easy to refer to self.manifest
1228 1228 mnfst = self.manifest
1229 1229 # We don't know which manifests are missing yet
1230 1230 msng_mnfst_set = {}
1231 1231 # Nor do we know which filenodes are missing.
1232 1232 msng_filenode_set = {}
1233 1233
1234 1234 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1235 1235 junk = None
1236 1236
1237 1237 # A changeset always belongs to itself, so the changenode lookup
1238 1238 # function for a changenode is identity.
1239 1239 def identity(x):
1240 1240 return x
1241 1241
1242 1242 # A function generating function. Sets up an environment for the
1243 1243 # inner function.
1244 1244 def cmp_by_rev_func(revlog):
1245 1245 # Compare two nodes by their revision number in the environment's
1246 1246 # revision history. Since the revision number both represents the
1247 1247 # most efficient order to read the nodes in, and represents a
1248 1248 # topological sorting of the nodes, this function is often useful.
1249 1249 def cmp_by_rev(a, b):
1250 1250 return cmp(revlog.rev(a), revlog.rev(b))
1251 1251 return cmp_by_rev
1252 1252
1253 1253 # If we determine that a particular file or manifest node must be a
1254 1254 # node that the recipient of the changegroup will already have, we can
1255 1255 # also assume the recipient will have all the parents. This function
1256 1256 # prunes them from the set of missing nodes.
1257 1257 def prune_parents(revlog, hasset, msngset):
1258 1258 haslst = hasset.keys()
1259 1259 haslst.sort(cmp_by_rev_func(revlog))
1260 1260 for node in haslst:
1261 1261 parentlst = [p for p in revlog.parents(node) if p != nullid]
1262 1262 while parentlst:
1263 1263 n = parentlst.pop()
1264 1264 if n not in hasset:
1265 1265 hasset[n] = 1
1266 1266 p = [p for p in revlog.parents(n) if p != nullid]
1267 1267 parentlst.extend(p)
1268 1268 for n in hasset:
1269 1269 msngset.pop(n, None)
1270 1270
1271 1271 # This is a function generating function used to set up an environment
1272 1272 # for the inner function to execute in.
1273 1273 def manifest_and_file_collector(changedfileset):
1274 1274 # This is an information gathering function that gathers
1275 1275 # information from each changeset node that goes out as part of
1276 1276 # the changegroup. The information gathered is a list of which
1277 1277 # manifest nodes are potentially required (the recipient may
1278 1278 # already have them) and total list of all files which were
1279 1279 # changed in any changeset in the changegroup.
1280 1280 #
1281 1281 # We also remember the first changenode we saw any manifest
1282 1282 # referenced by so we can later determine which changenode 'owns'
1283 1283 # the manifest.
1284 1284 def collect_manifests_and_files(clnode):
1285 1285 c = cl.read(clnode)
1286 1286 for f in c[3]:
1287 1287 # This is to make sure we only have one instance of each
1288 1288 # filename string for each filename.
1289 1289 changedfileset.setdefault(f, f)
1290 1290 msng_mnfst_set.setdefault(c[0], clnode)
1291 1291 return collect_manifests_and_files
1292 1292
1293 1293 # Figure out which manifest nodes (of the ones we think might be part
1294 1294 # of the changegroup) the recipient must know about and remove them
1295 1295 # from the changegroup.
1296 1296 def prune_manifests():
1297 1297 has_mnfst_set = {}
1298 1298 for n in msng_mnfst_set:
1299 1299 # If a 'missing' manifest thinks it belongs to a changenode
1300 1300 # the recipient is assumed to have, obviously the recipient
1301 1301 # must have that manifest.
1302 1302 linknode = cl.node(mnfst.linkrev(n))
1303 1303 if linknode in has_cl_set:
1304 1304 has_mnfst_set[n] = 1
1305 1305 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1306 1306
1307 1307 # Use the information collected in collect_manifests_and_files to say
1308 1308 # which changenode any manifestnode belongs to.
1309 1309 def lookup_manifest_link(mnfstnode):
1310 1310 return msng_mnfst_set[mnfstnode]
1311 1311
1312 1312 # A function generating function that sets up the initial environment
1313 1313 # the inner function.
1314 1314 def filenode_collector(changedfiles):
1315 1315 next_rev = [0]
1316 1316 # This gathers information from each manifestnode included in the
1317 1317 # changegroup about which filenodes the manifest node references
1318 1318 # so we can include those in the changegroup too.
1319 1319 #
1320 1320 # It also remembers which changenode each filenode belongs to. It
1321 1321 # does this by assuming the a filenode belongs to the changenode
1322 1322 # the first manifest that references it belongs to.
1323 1323 def collect_msng_filenodes(mnfstnode):
1324 1324 r = mnfst.rev(mnfstnode)
1325 1325 if r == next_rev[0]:
1326 1326 # If the last rev we looked at was the one just previous,
1327 1327 # we only need to see a diff.
1328 1328 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1329 1329 # For each line in the delta
1330 1330 for dline in delta.splitlines():
1331 1331 # get the filename and filenode for that line
1332 1332 f, fnode = dline.split('\0')
1333 1333 fnode = bin(fnode[:40])
1334 1334 f = changedfiles.get(f, None)
1335 1335 # And if the file is in the list of files we care
1336 1336 # about.
1337 1337 if f is not None:
1338 1338 # Get the changenode this manifest belongs to
1339 1339 clnode = msng_mnfst_set[mnfstnode]
1340 1340 # Create the set of filenodes for the file if
1341 1341 # there isn't one already.
1342 1342 ndset = msng_filenode_set.setdefault(f, {})
1343 1343 # And set the filenode's changelog node to the
1344 1344 # manifest's if it hasn't been set already.
1345 1345 ndset.setdefault(fnode, clnode)
1346 1346 else:
1347 1347 # Otherwise we need a full manifest.
1348 1348 m = mnfst.read(mnfstnode)
1349 1349 # For every file in we care about.
1350 1350 for f in changedfiles:
1351 1351 fnode = m.get(f, None)
1352 1352 # If it's in the manifest
1353 1353 if fnode is not None:
1354 1354 # See comments above.
1355 1355 clnode = msng_mnfst_set[mnfstnode]
1356 1356 ndset = msng_filenode_set.setdefault(f, {})
1357 1357 ndset.setdefault(fnode, clnode)
1358 1358 # Remember the revision we hope to see next.
1359 1359 next_rev[0] = r + 1
1360 1360 return collect_msng_filenodes
1361 1361
1362 1362 # We have a list of filenodes we think we need for a file, lets remove
1363 1363 # all those we now the recipient must have.
1364 1364 def prune_filenodes(f, filerevlog):
1365 1365 msngset = msng_filenode_set[f]
1366 1366 hasset = {}
1367 1367 # If a 'missing' filenode thinks it belongs to a changenode we
1368 1368 # assume the recipient must have, then the recipient must have
1369 1369 # that filenode.
1370 1370 for n in msngset:
1371 1371 clnode = cl.node(filerevlog.linkrev(n))
1372 1372 if clnode in has_cl_set:
1373 1373 hasset[n] = 1
1374 1374 prune_parents(filerevlog, hasset, msngset)
1375 1375
1376 1376 # A function generator function that sets up the a context for the
1377 1377 # inner function.
1378 1378 def lookup_filenode_link_func(fname):
1379 1379 msngset = msng_filenode_set[fname]
1380 1380 # Lookup the changenode the filenode belongs to.
1381 1381 def lookup_filenode_link(fnode):
1382 1382 return msngset[fnode]
1383 1383 return lookup_filenode_link
1384 1384
1385 1385 # Now that we have all theses utility functions to help out and
1386 1386 # logically divide up the task, generate the group.
1387 1387 def gengroup():
1388 1388 # The set of changed files starts empty.
1389 1389 changedfiles = {}
1390 1390 # Create a changenode group generator that will call our functions
1391 1391 # back to lookup the owning changenode and collect information.
1392 1392 group = cl.group(msng_cl_lst, identity,
1393 1393 manifest_and_file_collector(changedfiles))
1394 1394 for chnk in group:
1395 1395 yield chnk
1396 1396
1397 1397 # The list of manifests has been collected by the generator
1398 1398 # calling our functions back.
1399 1399 prune_manifests()
1400 1400 msng_mnfst_lst = msng_mnfst_set.keys()
1401 1401 # Sort the manifestnodes by revision number.
1402 1402 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1403 1403 # Create a generator for the manifestnodes that calls our lookup
1404 1404 # and data collection functions back.
1405 1405 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1406 1406 filenode_collector(changedfiles))
1407 1407 for chnk in group:
1408 1408 yield chnk
1409 1409
1410 1410 # These are no longer needed, dereference and toss the memory for
1411 1411 # them.
1412 1412 msng_mnfst_lst = None
1413 1413 msng_mnfst_set.clear()
1414 1414
1415 1415 changedfiles = changedfiles.keys()
1416 1416 changedfiles.sort()
1417 1417 # Go through all our files in order sorted by name.
1418 1418 for fname in changedfiles:
1419 1419 filerevlog = self.file(fname)
1420 1420 # Toss out the filenodes that the recipient isn't really
1421 1421 # missing.
1422 1422 if msng_filenode_set.has_key(fname):
1423 1423 prune_filenodes(fname, filerevlog)
1424 1424 msng_filenode_lst = msng_filenode_set[fname].keys()
1425 1425 else:
1426 1426 msng_filenode_lst = []
1427 1427 # If any filenodes are left, generate the group for them,
1428 1428 # otherwise don't bother.
1429 1429 if len(msng_filenode_lst) > 0:
1430 1430 yield changegroup.genchunk(fname)
1431 1431 # Sort the filenodes by their revision #
1432 1432 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1433 1433 # Create a group generator and only pass in a changenode
1434 1434 # lookup function as we need to collect no information
1435 1435 # from filenodes.
1436 1436 group = filerevlog.group(msng_filenode_lst,
1437 1437 lookup_filenode_link_func(fname))
1438 1438 for chnk in group:
1439 1439 yield chnk
1440 1440 if msng_filenode_set.has_key(fname):
1441 1441 # Don't need this anymore, toss it to free memory.
1442 1442 del msng_filenode_set[fname]
1443 1443 # Signal that no more groups are left.
1444 1444 yield changegroup.closechunk()
1445 1445
1446 1446 if msng_cl_lst:
1447 1447 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1448 1448
1449 1449 return util.chunkbuffer(gengroup())
1450 1450
1451 1451 def changegroup(self, basenodes, source):
1452 1452 """Generate a changegroup of all nodes that we have that a recipient
1453 1453 doesn't.
1454 1454
1455 1455 This is much easier than the previous function as we can assume that
1456 1456 the recipient has any changenode we aren't sending them."""
1457 1457
1458 1458 self.hook('preoutgoing', throw=True, source=source)
1459 1459
1460 1460 cl = self.changelog
1461 1461 nodes = cl.nodesbetween(basenodes, None)[0]
1462 1462 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1463 1463
1464 1464 def identity(x):
1465 1465 return x
1466 1466
1467 1467 def gennodelst(revlog):
1468 1468 for r in xrange(0, revlog.count()):
1469 1469 n = revlog.node(r)
1470 1470 if revlog.linkrev(n) in revset:
1471 1471 yield n
1472 1472
1473 1473 def changed_file_collector(changedfileset):
1474 1474 def collect_changed_files(clnode):
1475 1475 c = cl.read(clnode)
1476 1476 for fname in c[3]:
1477 1477 changedfileset[fname] = 1
1478 1478 return collect_changed_files
1479 1479
1480 1480 def lookuprevlink_func(revlog):
1481 1481 def lookuprevlink(n):
1482 1482 return cl.node(revlog.linkrev(n))
1483 1483 return lookuprevlink
1484 1484
1485 1485 def gengroup():
1486 1486 # construct a list of all changed files
1487 1487 changedfiles = {}
1488 1488
1489 1489 for chnk in cl.group(nodes, identity,
1490 1490 changed_file_collector(changedfiles)):
1491 1491 yield chnk
1492 1492 changedfiles = changedfiles.keys()
1493 1493 changedfiles.sort()
1494 1494
1495 1495 mnfst = self.manifest
1496 1496 nodeiter = gennodelst(mnfst)
1497 1497 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1498 1498 yield chnk
1499 1499
1500 1500 for fname in changedfiles:
1501 1501 filerevlog = self.file(fname)
1502 1502 nodeiter = gennodelst(filerevlog)
1503 1503 nodeiter = list(nodeiter)
1504 1504 if nodeiter:
1505 1505 yield changegroup.genchunk(fname)
1506 1506 lookup = lookuprevlink_func(filerevlog)
1507 1507 for chnk in filerevlog.group(nodeiter, lookup):
1508 1508 yield chnk
1509 1509
1510 1510 yield changegroup.closechunk()
1511 1511
1512 1512 if nodes:
1513 1513 self.hook('outgoing', node=hex(nodes[0]), source=source)
1514 1514
1515 1515 return util.chunkbuffer(gengroup())
1516 1516
1517 1517 def addchangegroup(self, source, srctype):
1518 1518 """add changegroup to repo.
1519 1519 returns number of heads modified or added + 1."""
1520 1520
1521 1521 def csmap(x):
1522 1522 self.ui.debug(_("add changeset %s\n") % short(x))
1523 1523 return cl.count()
1524 1524
1525 1525 def revmap(x):
1526 1526 return cl.rev(x)
1527 1527
1528 1528 if not source:
1529 1529 return 0
1530 1530
1531 1531 self.hook('prechangegroup', throw=True, source=srctype)
1532 1532
1533 1533 changesets = files = revisions = 0
1534 1534
1535 1535 tr = self.transaction()
1536 1536
1537 1537 # write changelog data to temp files so concurrent readers will not see
1538 1538 # inconsistent view
1539 1539 cl = None
1540 1540 try:
1541 1541 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1542 1542
1543 1543 oldheads = len(cl.heads())
1544 1544
1545 1545 # pull off the changeset group
1546 1546 self.ui.status(_("adding changesets\n"))
1547 1547 cor = cl.count() - 1
1548 1548 chunkiter = changegroup.chunkiter(source)
1549 1549 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1550 1550 raise util.Abort(_("received changelog group is empty"))
1551 1551 cnr = cl.count() - 1
1552 1552 changesets = cnr - cor
1553 1553
1554 1554 # pull off the manifest group
1555 1555 self.ui.status(_("adding manifests\n"))
1556 1556 chunkiter = changegroup.chunkiter(source)
1557 1557 # no need to check for empty manifest group here:
1558 1558 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1559 1559 # no new manifest will be created and the manifest group will
1560 1560 # be empty during the pull
1561 1561 self.manifest.addgroup(chunkiter, revmap, tr)
1562 1562
1563 1563 # process the files
1564 1564 self.ui.status(_("adding file changes\n"))
1565 1565 while 1:
1566 1566 f = changegroup.getchunk(source)
1567 1567 if not f:
1568 1568 break
1569 1569 self.ui.debug(_("adding %s revisions\n") % f)
1570 1570 fl = self.file(f)
1571 1571 o = fl.count()
1572 1572 chunkiter = changegroup.chunkiter(source)
1573 1573 if fl.addgroup(chunkiter, revmap, tr) is None:
1574 1574 raise util.Abort(_("received file revlog group is empty"))
1575 1575 revisions += fl.count() - o
1576 1576 files += 1
1577 1577
1578 1578 cl.writedata()
1579 1579 finally:
1580 1580 if cl:
1581 1581 cl.cleanup()
1582 1582
1583 1583 # make changelog see real files again
1584 1584 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1585 1585 self.changelog.checkinlinesize(tr)
1586 1586
1587 1587 newheads = len(self.changelog.heads())
1588 1588 heads = ""
1589 1589 if oldheads and newheads != oldheads:
1590 1590 heads = _(" (%+d heads)") % (newheads - oldheads)
1591 1591
1592 1592 self.ui.status(_("added %d changesets"
1593 1593 " with %d changes to %d files%s\n")
1594 1594 % (changesets, revisions, files, heads))
1595 1595
1596 1596 if changesets > 0:
1597 1597 self.hook('pretxnchangegroup', throw=True,
1598 1598 node=hex(self.changelog.node(cor+1)), source=srctype)
1599 1599
1600 1600 tr.close()
1601 1601
1602 1602 if changesets > 0:
1603 1603 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1604 1604 source=srctype)
1605 1605
1606 1606 for i in range(cor + 1, cnr + 1):
1607 1607 self.hook("incoming", node=hex(self.changelog.node(i)),
1608 1608 source=srctype)
1609 1609
1610 1610 return newheads - oldheads + 1
1611 1611
1612 1612 def update(self, node, allow=False, force=False, choose=None,
1613 1613 moddirstate=True, forcemerge=False, wlock=None, show_stats=True):
1614 1614 pl = self.dirstate.parents()
1615 1615 if not force and pl[1] != nullid:
1616 1616 raise util.Abort(_("outstanding uncommitted merges"))
1617 1617
1618 1618 err = False
1619 1619
1620 1620 p1, p2 = pl[0], node
1621 1621 pa = self.changelog.ancestor(p1, p2)
1622 1622 m1n = self.changelog.read(p1)[0]
1623 1623 m2n = self.changelog.read(p2)[0]
1624 1624 man = self.manifest.ancestor(m1n, m2n)
1625 1625 m1 = self.manifest.read(m1n)
1626 1626 mf1 = self.manifest.readflags(m1n)
1627 1627 m2 = self.manifest.read(m2n).copy()
1628 1628 mf2 = self.manifest.readflags(m2n)
1629 1629 ma = self.manifest.read(man)
1630 1630 mfa = self.manifest.readflags(man)
1631 1631
1632 1632 modified, added, removed, deleted, unknown = self.changes()
1633 1633
1634 1634 # is this a jump, or a merge? i.e. is there a linear path
1635 1635 # from p1 to p2?
1636 1636 linear_path = (pa == p1 or pa == p2)
1637 1637
1638 1638 if allow and linear_path:
1639 1639 raise util.Abort(_("there is nothing to merge, "
1640 1640 "just use 'hg update'"))
1641 1641 if allow and not forcemerge:
1642 1642 if modified or added or removed:
1643 1643 raise util.Abort(_("outstanding uncommitted changes"))
1644 1644
1645 1645 if not forcemerge and not force:
1646 1646 for f in unknown:
1647 1647 if f in m2:
1648 1648 t1 = self.wread(f)
1649 1649 t2 = self.file(f).read(m2[f])
1650 1650 if cmp(t1, t2) != 0:
1651 1651 raise util.Abort(_("'%s' already exists in the working"
1652 1652 " dir and differs from remote") % f)
1653 1653
1654 1654 # resolve the manifest to determine which files
1655 1655 # we care about merging
1656 1656 self.ui.note(_("resolving manifests\n"))
1657 1657 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1658 1658 (force, allow, moddirstate, linear_path))
1659 1659 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1660 1660 (short(man), short(m1n), short(m2n)))
1661 1661
1662 1662 merge = {}
1663 1663 get = {}
1664 1664 remove = []
1665 1665
1666 1666 # construct a working dir manifest
1667 1667 mw = m1.copy()
1668 1668 mfw = mf1.copy()
1669 1669 umap = dict.fromkeys(unknown)
1670 1670
1671 1671 for f in added + modified + unknown:
1672 1672 mw[f] = ""
1673 1673 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1674 1674
1675 1675 if moddirstate and not wlock:
1676 1676 wlock = self.wlock()
1677 1677
1678 1678 for f in deleted + removed:
1679 1679 if f in mw:
1680 1680 del mw[f]
1681 1681
1682 1682 # If we're jumping between revisions (as opposed to merging),
1683 1683 # and if neither the working directory nor the target rev has
1684 1684 # the file, then we need to remove it from the dirstate, to
1685 1685 # prevent the dirstate from listing the file when it is no
1686 1686 # longer in the manifest.
1687 1687 if moddirstate and linear_path and f not in m2:
1688 1688 self.dirstate.forget((f,))
1689 1689
1690 1690 # Compare manifests
1691 1691 for f, n in mw.iteritems():
1692 1692 if choose and not choose(f):
1693 1693 continue
1694 1694 if f in m2:
1695 1695 s = 0
1696 1696
1697 1697 # is the wfile new since m1, and match m2?
1698 1698 if f not in m1:
1699 1699 t1 = self.wread(f)
1700 1700 t2 = self.file(f).read(m2[f])
1701 1701 if cmp(t1, t2) == 0:
1702 1702 n = m2[f]
1703 1703 del t1, t2
1704 1704
1705 1705 # are files different?
1706 1706 if n != m2[f]:
1707 1707 a = ma.get(f, nullid)
1708 1708 # are both different from the ancestor?
1709 1709 if n != a and m2[f] != a:
1710 1710 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1711 1711 # merge executable bits
1712 1712 # "if we changed or they changed, change in merge"
1713 1713 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1714 1714 mode = ((a^b) | (a^c)) ^ a
1715 1715 merge[f] = (m1.get(f, nullid), m2[f], mode)
1716 1716 s = 1
1717 1717 # are we clobbering?
1718 1718 # is remote's version newer?
1719 1719 # or are we going back in time?
1720 1720 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1721 1721 self.ui.debug(_(" remote %s is newer, get\n") % f)
1722 1722 get[f] = m2[f]
1723 1723 s = 1
1724 1724 elif f in umap or f in added:
1725 1725 # this unknown file is the same as the checkout
1726 1726 # we need to reset the dirstate if the file was added
1727 1727 get[f] = m2[f]
1728 1728
1729 1729 if not s and mfw[f] != mf2[f]:
1730 1730 if force:
1731 1731 self.ui.debug(_(" updating permissions for %s\n") % f)
1732 1732 util.set_exec(self.wjoin(f), mf2[f])
1733 1733 else:
1734 1734 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1735 1735 mode = ((a^b) | (a^c)) ^ a
1736 1736 if mode != b:
1737 1737 self.ui.debug(_(" updating permissions for %s\n")
1738 1738 % f)
1739 1739 util.set_exec(self.wjoin(f), mode)
1740 1740 del m2[f]
1741 1741 elif f in ma:
1742 1742 if n != ma[f]:
1743 1743 r = _("d")
1744 1744 if not force and (linear_path or allow):
1745 1745 r = self.ui.prompt(
1746 1746 (_(" local changed %s which remote deleted\n") % f) +
1747 1747 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1748 1748 if r == _("d"):
1749 1749 remove.append(f)
1750 1750 else:
1751 1751 self.ui.debug(_("other deleted %s\n") % f)
1752 1752 remove.append(f) # other deleted it
1753 1753 else:
1754 1754 # file is created on branch or in working directory
1755 1755 if force and f not in umap:
1756 1756 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1757 1757 remove.append(f)
1758 1758 elif n == m1.get(f, nullid): # same as parent
1759 1759 if p2 == pa: # going backwards?
1760 1760 self.ui.debug(_("remote deleted %s\n") % f)
1761 1761 remove.append(f)
1762 1762 else:
1763 1763 self.ui.debug(_("local modified %s, keeping\n") % f)
1764 1764 else:
1765 1765 self.ui.debug(_("working dir created %s, keeping\n") % f)
1766 1766
1767 1767 for f, n in m2.iteritems():
1768 1768 if choose and not choose(f):
1769 1769 continue
1770 1770 if f[0] == "/":
1771 1771 continue
1772 1772 if f in ma and n != ma[f]:
1773 1773 r = _("k")
1774 1774 if not force and (linear_path or allow):
1775 1775 r = self.ui.prompt(
1776 1776 (_("remote changed %s which local deleted\n") % f) +
1777 1777 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1778 1778 if r == _("k"):
1779 1779 get[f] = n
1780 1780 elif f not in ma:
1781 1781 self.ui.debug(_("remote created %s\n") % f)
1782 1782 get[f] = n
1783 1783 else:
1784 1784 if force or p2 == pa: # going backwards?
1785 1785 self.ui.debug(_("local deleted %s, recreating\n") % f)
1786 1786 get[f] = n
1787 1787 else:
1788 1788 self.ui.debug(_("local deleted %s\n") % f)
1789 1789
1790 1790 del mw, m1, m2, ma
1791 1791
1792 1792 if force:
1793 1793 for f in merge:
1794 1794 get[f] = merge[f][1]
1795 1795 merge = {}
1796 1796
1797 1797 if linear_path or force:
1798 1798 # we don't need to do any magic, just jump to the new rev
1799 1799 branch_merge = False
1800 1800 p1, p2 = p2, nullid
1801 1801 else:
1802 1802 if not allow:
1803 1803 self.ui.status(_("this update spans a branch"
1804 1804 " affecting the following files:\n"))
1805 1805 fl = merge.keys() + get.keys()
1806 1806 fl.sort()
1807 1807 for f in fl:
1808 1808 cf = ""
1809 1809 if f in merge:
1810 1810 cf = _(" (resolve)")
1811 1811 self.ui.status(" %s%s\n" % (f, cf))
1812 1812 self.ui.warn(_("aborting update spanning branches!\n"))
1813 1813 self.ui.status(_("(use 'hg merge' to merge across branches"
1814 1814 " or 'hg update -C' to lose changes)\n"))
1815 1815 return 1
1816 1816 branch_merge = True
1817 1817
1818 1818 xp1 = hex(p1)
1819 1819 xp2 = hex(p2)
1820 1820 if p2 == nullid: xxp2 = ''
1821 1821 else: xxp2 = xp2
1822 1822
1823 1823 self.hook('preupdate', throw=True, parent1=xp1, parent2=xxp2)
1824 1824
1825 1825 # get the files we don't need to change
1826 1826 files = get.keys()
1827 1827 files.sort()
1828 1828 for f in files:
1829 1829 if f[0] == "/":
1830 1830 continue
1831 1831 self.ui.note(_("getting %s\n") % f)
1832 1832 t = self.file(f).read(get[f])
1833 1833 self.wwrite(f, t)
1834 1834 util.set_exec(self.wjoin(f), mf2[f])
1835 1835 if moddirstate:
1836 1836 if branch_merge:
1837 1837 self.dirstate.update([f], 'n', st_mtime=-1)
1838 1838 else:
1839 1839 self.dirstate.update([f], 'n')
1840 1840
1841 1841 # merge the tricky bits
1842 1842 failedmerge = []
1843 1843 files = merge.keys()
1844 1844 files.sort()
1845 1845 for f in files:
1846 1846 self.ui.status(_("merging %s\n") % f)
1847 1847 my, other, flag = merge[f]
1848 1848 ret = self.merge3(f, my, other, xp1, xp2)
1849 1849 if ret:
1850 1850 err = True
1851 1851 failedmerge.append(f)
1852 1852 util.set_exec(self.wjoin(f), flag)
1853 1853 if moddirstate:
1854 1854 if branch_merge:
1855 1855 # We've done a branch merge, mark this file as merged
1856 1856 # so that we properly record the merger later
1857 1857 self.dirstate.update([f], 'm')
1858 1858 else:
1859 1859 # We've update-merged a locally modified file, so
1860 1860 # we set the dirstate to emulate a normal checkout
1861 1861 # of that file some time in the past. Thus our
1862 1862 # merge will appear as a normal local file
1863 1863 # modification.
1864 1864 f_len = len(self.file(f).read(other))
1865 1865 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1866 1866
1867 1867 remove.sort()
1868 1868 for f in remove:
1869 1869 self.ui.note(_("removing %s\n") % f)
1870 1870 util.audit_path(f)
1871 1871 try:
1872 1872 util.unlink(self.wjoin(f))
1873 1873 except OSError, inst:
1874 1874 if inst.errno != errno.ENOENT:
1875 1875 self.ui.warn(_("update failed to remove %s: %s!\n") %
1876 1876 (f, inst.strerror))
1877 1877 if moddirstate:
1878 1878 if branch_merge:
1879 1879 self.dirstate.update(remove, 'r')
1880 1880 else:
1881 1881 self.dirstate.forget(remove)
1882 1882
1883 1883 if moddirstate:
1884 1884 self.dirstate.setparents(p1, p2)
1885 1885
1886 1886 if show_stats:
1887 1887 stats = ((len(get), _("updated")),
1888 1888 (len(merge) - len(failedmerge), _("merged")),
1889 1889 (len(remove), _("removed")),
1890 1890 (len(failedmerge), _("unresolved")))
1891 1891 note = ", ".join([_("%d files %s") % s for s in stats])
1892 1892 self.ui.status("%s\n" % note)
1893 1893 if moddirstate:
1894 1894 if branch_merge:
1895 1895 if failedmerge:
1896 1896 self.ui.status(_("There are unresolved merges,"
1897 1897 " you can redo the full merge using:\n"
1898 1898 " hg update -C %s\n"
1899 1899 " hg merge %s\n"
1900 1900 % (self.changelog.rev(p1),
1901 1901 self.changelog.rev(p2))))
1902 1902 else:
1903 1903 self.ui.status(_("(branch merge, don't forget to commit)\n"))
1904 1904 elif failedmerge:
1905 1905 self.ui.status(_("There are unresolved merges with"
1906 1906 " locally modified files.\n"))
1907 1907
1908 1908 self.hook('update', parent1=xp1, parent2=xxp2, error=int(err))
1909 1909 return err
1910 1910
1911 1911 def merge3(self, fn, my, other, p1, p2):
1912 1912 """perform a 3-way merge in the working directory"""
1913 1913
1914 1914 def temp(prefix, node):
1915 1915 pre = "%s~%s." % (os.path.basename(fn), prefix)
1916 1916 (fd, name) = tempfile.mkstemp(prefix=pre)
1917 1917 f = os.fdopen(fd, "wb")
1918 1918 self.wwrite(fn, fl.read(node), f)
1919 1919 f.close()
1920 1920 return name
1921 1921
1922 1922 fl = self.file(fn)
1923 1923 base = fl.ancestor(my, other)
1924 1924 a = self.wjoin(fn)
1925 1925 b = temp("base", base)
1926 1926 c = temp("other", other)
1927 1927
1928 1928 self.ui.note(_("resolving %s\n") % fn)
1929 1929 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1930 1930 (fn, short(my), short(other), short(base)))
1931 1931
1932 1932 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1933 1933 or "hgmerge")
1934 1934 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1935 1935 environ={'HG_FILE': fn,
1936 1936 'HG_MY_NODE': p1,
1937 1937 'HG_OTHER_NODE': p2,
1938 1938 'HG_FILE_MY_NODE': hex(my),
1939 1939 'HG_FILE_OTHER_NODE': hex(other),
1940 1940 'HG_FILE_BASE_NODE': hex(base)})
1941 1941 if r:
1942 1942 self.ui.warn(_("merging %s failed!\n") % fn)
1943 1943
1944 1944 os.unlink(b)
1945 1945 os.unlink(c)
1946 1946 return r
1947 1947
1948 1948 def verify(self):
1949 1949 filelinkrevs = {}
1950 1950 filenodes = {}
1951 1951 changesets = revisions = files = 0
1952 1952 errors = [0]
1953 1953 warnings = [0]
1954 1954 neededmanifests = {}
1955 1955
1956 1956 def err(msg):
1957 1957 self.ui.warn(msg + "\n")
1958 1958 errors[0] += 1
1959 1959
1960 1960 def warn(msg):
1961 1961 self.ui.warn(msg + "\n")
1962 1962 warnings[0] += 1
1963 1963
1964 1964 def checksize(obj, name):
1965 1965 d = obj.checksize()
1966 1966 if d[0]:
1967 1967 err(_("%s data length off by %d bytes") % (name, d[0]))
1968 1968 if d[1]:
1969 1969 err(_("%s index contains %d extra bytes") % (name, d[1]))
1970 1970
1971 1971 def checkversion(obj, name):
1972 1972 if obj.version != revlog.REVLOGV0:
1973 1973 if not revlogv1:
1974 1974 warn(_("warning: `%s' uses revlog format 1") % name)
1975 1975 elif revlogv1:
1976 1976 warn(_("warning: `%s' uses revlog format 0") % name)
1977 1977
1978 1978 revlogv1 = self.revlogversion != revlog.REVLOGV0
1979 1979 if self.ui.verbose or revlogv1 != self.revlogv1:
1980 1980 self.ui.status(_("repository uses revlog format %d\n") %
1981 1981 (revlogv1 and 1 or 0))
1982 1982
1983 1983 seen = {}
1984 1984 self.ui.status(_("checking changesets\n"))
1985 1985 checksize(self.changelog, "changelog")
1986 1986
1987 1987 for i in range(self.changelog.count()):
1988 1988 changesets += 1
1989 1989 n = self.changelog.node(i)
1990 1990 l = self.changelog.linkrev(n)
1991 1991 if l != i:
1992 1992 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1993 1993 if n in seen:
1994 1994 err(_("duplicate changeset at revision %d") % i)
1995 1995 seen[n] = 1
1996 1996
1997 1997 for p in self.changelog.parents(n):
1998 1998 if p not in self.changelog.nodemap:
1999 1999 err(_("changeset %s has unknown parent %s") %
2000 2000 (short(n), short(p)))
2001 2001 try:
2002 2002 changes = self.changelog.read(n)
2003 2003 except KeyboardInterrupt:
2004 2004 self.ui.warn(_("interrupted"))
2005 2005 raise
2006 2006 except Exception, inst:
2007 2007 err(_("unpacking changeset %s: %s") % (short(n), inst))
2008 2008 continue
2009 2009
2010 2010 neededmanifests[changes[0]] = n
2011 2011
2012 2012 for f in changes[3]:
2013 2013 filelinkrevs.setdefault(f, []).append(i)
2014 2014
2015 2015 seen = {}
2016 2016 self.ui.status(_("checking manifests\n"))
2017 2017 checkversion(self.manifest, "manifest")
2018 2018 checksize(self.manifest, "manifest")
2019 2019
2020 2020 for i in range(self.manifest.count()):
2021 2021 n = self.manifest.node(i)
2022 2022 l = self.manifest.linkrev(n)
2023 2023
2024 2024 if l < 0 or l >= self.changelog.count():
2025 2025 err(_("bad manifest link (%d) at revision %d") % (l, i))
2026 2026
2027 2027 if n in neededmanifests:
2028 2028 del neededmanifests[n]
2029 2029
2030 2030 if n in seen:
2031 2031 err(_("duplicate manifest at revision %d") % i)
2032 2032
2033 2033 seen[n] = 1
2034 2034
2035 2035 for p in self.manifest.parents(n):
2036 2036 if p not in self.manifest.nodemap:
2037 2037 err(_("manifest %s has unknown parent %s") %
2038 2038 (short(n), short(p)))
2039 2039
2040 2040 try:
2041 2041 delta = mdiff.patchtext(self.manifest.delta(n))
2042 2042 except KeyboardInterrupt:
2043 2043 self.ui.warn(_("interrupted"))
2044 2044 raise
2045 2045 except Exception, inst:
2046 2046 err(_("unpacking manifest %s: %s") % (short(n), inst))
2047 2047 continue
2048 2048
2049 2049 try:
2050 2050 ff = [ l.split('\0') for l in delta.splitlines() ]
2051 2051 for f, fn in ff:
2052 2052 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
2053 2053 except (ValueError, TypeError), inst:
2054 2054 err(_("broken delta in manifest %s: %s") % (short(n), inst))
2055 2055
2056 2056 self.ui.status(_("crosschecking files in changesets and manifests\n"))
2057 2057
2058 2058 for m, c in neededmanifests.items():
2059 2059 err(_("Changeset %s refers to unknown manifest %s") %
2060 2060 (short(m), short(c)))
2061 2061 del neededmanifests
2062 2062
2063 2063 for f in filenodes:
2064 2064 if f not in filelinkrevs:
2065 2065 err(_("file %s in manifest but not in changesets") % f)
2066 2066
2067 2067 for f in filelinkrevs:
2068 2068 if f not in filenodes:
2069 2069 err(_("file %s in changeset but not in manifest") % f)
2070 2070
2071 2071 self.ui.status(_("checking files\n"))
2072 2072 ff = filenodes.keys()
2073 2073 ff.sort()
2074 2074 for f in ff:
2075 2075 if f == "/dev/null":
2076 2076 continue
2077 2077 files += 1
2078 2078 if not f:
2079 2079 err(_("file without name in manifest %s") % short(n))
2080 2080 continue
2081 2081 fl = self.file(f)
2082 2082 checkversion(fl, f)
2083 2083 checksize(fl, f)
2084 2084
2085 2085 nodes = {nullid: 1}
2086 2086 seen = {}
2087 2087 for i in range(fl.count()):
2088 2088 revisions += 1
2089 2089 n = fl.node(i)
2090 2090
2091 2091 if n in seen:
2092 2092 err(_("%s: duplicate revision %d") % (f, i))
2093 2093 if n not in filenodes[f]:
2094 2094 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
2095 2095 else:
2096 2096 del filenodes[f][n]
2097 2097
2098 2098 flr = fl.linkrev(n)
2099 2099 if flr not in filelinkrevs.get(f, []):
2100 2100 err(_("%s:%s points to unexpected changeset %d")
2101 2101 % (f, short(n), flr))
2102 2102 else:
2103 2103 filelinkrevs[f].remove(flr)
2104 2104
2105 2105 # verify contents
2106 2106 try:
2107 2107 t = fl.read(n)
2108 2108 except KeyboardInterrupt:
2109 2109 self.ui.warn(_("interrupted"))
2110 2110 raise
2111 2111 except Exception, inst:
2112 2112 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
2113 2113
2114 2114 # verify parents
2115 2115 (p1, p2) = fl.parents(n)
2116 2116 if p1 not in nodes:
2117 2117 err(_("file %s:%s unknown parent 1 %s") %
2118 2118 (f, short(n), short(p1)))
2119 2119 if p2 not in nodes:
2120 2120 err(_("file %s:%s unknown parent 2 %s") %
2121 2121 (f, short(n), short(p1)))
2122 2122 nodes[n] = 1
2123 2123
2124 2124 # cross-check
2125 2125 for node in filenodes[f]:
2126 2126 err(_("node %s in manifests not in %s") % (hex(node), f))
2127 2127
2128 2128 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
2129 2129 (files, changesets, revisions))
2130 2130
2131 2131 if warnings[0]:
2132 2132 self.ui.warn(_("%d warnings encountered!\n") % warnings[0])
2133 2133 if errors[0]:
2134 2134 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
2135 2135 return 1
2136 2136
2137 2137 # used to avoid circular references so destructors work
2138 2138 def aftertrans(base):
2139 2139 p = base
2140 2140 def a():
2141 2141 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
2142 2142 util.rename(os.path.join(p, "journal.dirstate"),
2143 2143 os.path.join(p, "undo.dirstate"))
2144 2144 return a
2145 2145
@@ -1,900 +1,900 b''
1 1 """
2 2 util.py - Mercurial utility functions and platform specfic implementations
3 3
4 4 Copyright 2005 K. Thananchayan <thananck@yahoo.com>
5 5
6 6 This software may be used and distributed according to the terms
7 7 of the GNU General Public License, incorporated herein by reference.
8 8
9 9 This contains helper routines that are independent of the SCM core and hide
10 10 platform-specific details from the core.
11 11 """
12 12
13 13 import os, errno
14 14 from i18n import gettext as _
15 15 from demandload import *
16 16 demandload(globals(), "cStringIO errno popen2 re shutil sys tempfile")
17 17 demandload(globals(), "threading time")
18 18
19 19 class SignalInterrupt(Exception):
20 20 """Exception raised on SIGTERM and SIGHUP."""
21 21
22 22 def pipefilter(s, cmd):
23 23 '''filter string S through command CMD, returning its output'''
24 24 (pout, pin) = popen2.popen2(cmd, -1, 'b')
25 25 def writer():
26 26 try:
27 27 pin.write(s)
28 28 pin.close()
29 29 except IOError, inst:
30 30 if inst.errno != errno.EPIPE:
31 31 raise
32 32
33 33 # we should use select instead on UNIX, but this will work on most
34 34 # systems, including Windows
35 35 w = threading.Thread(target=writer)
36 36 w.start()
37 37 f = pout.read()
38 38 pout.close()
39 39 w.join()
40 40 return f
41 41
42 42 def tempfilter(s, cmd):
43 43 '''filter string S through a pair of temporary files with CMD.
44 44 CMD is used as a template to create the real command to be run,
45 45 with the strings INFILE and OUTFILE replaced by the real names of
46 46 the temporary files generated.'''
47 47 inname, outname = None, None
48 48 try:
49 49 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
50 50 fp = os.fdopen(infd, 'wb')
51 51 fp.write(s)
52 52 fp.close()
53 53 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
54 54 os.close(outfd)
55 55 cmd = cmd.replace('INFILE', inname)
56 56 cmd = cmd.replace('OUTFILE', outname)
57 57 code = os.system(cmd)
58 58 if code: raise Abort(_("command '%s' failed: %s") %
59 59 (cmd, explain_exit(code)))
60 60 return open(outname, 'rb').read()
61 61 finally:
62 62 try:
63 63 if inname: os.unlink(inname)
64 64 except: pass
65 65 try:
66 66 if outname: os.unlink(outname)
67 67 except: pass
68 68
69 69 filtertable = {
70 70 'tempfile:': tempfilter,
71 71 'pipe:': pipefilter,
72 72 }
73 73
74 74 def filter(s, cmd):
75 75 "filter a string through a command that transforms its input to its output"
76 76 for name, fn in filtertable.iteritems():
77 77 if cmd.startswith(name):
78 78 return fn(s, cmd[len(name):].lstrip())
79 79 return pipefilter(s, cmd)
80 80
81 81 def find_in_path(name, path, default=None):
82 82 '''find name in search path. path can be string (will be split
83 83 with os.pathsep), or iterable thing that returns strings. if name
84 84 found, return path to name. else return default.'''
85 85 if isinstance(path, str):
86 86 path = path.split(os.pathsep)
87 87 for p in path:
88 88 p_name = os.path.join(p, name)
89 89 if os.path.exists(p_name):
90 90 return p_name
91 91 return default
92 92
93 93 def patch(strip, patchname, ui):
94 94 """apply the patch <patchname> to the working directory.
95 95 a list of patched files is returned"""
96 96 patcher = find_in_path('gpatch', os.environ.get('PATH', ''), 'patch')
97 97 fp = os.popen('%s -p%d < "%s"' % (patcher, strip, patchname))
98 98 files = {}
99 99 for line in fp:
100 100 line = line.rstrip()
101 101 ui.status("%s\n" % line)
102 102 if line.startswith('patching file '):
103 103 pf = parse_patch_output(line)
104 104 files.setdefault(pf, 1)
105 105 code = fp.close()
106 106 if code:
107 107 raise Abort(_("patch command failed: %s") % explain_exit(code)[0])
108 108 return files.keys()
109 109
110 110 def binary(s):
111 111 """return true if a string is binary data using diff's heuristic"""
112 112 if s and '\0' in s[:4096]:
113 113 return True
114 114 return False
115 115
116 116 def unique(g):
117 117 """return the uniq elements of iterable g"""
118 118 seen = {}
119 119 for f in g:
120 120 if f not in seen:
121 121 seen[f] = 1
122 122 yield f
123 123
124 124 class Abort(Exception):
125 125 """Raised if a command needs to print an error and exit."""
126 126
127 127 def always(fn): return True
128 128 def never(fn): return False
129 129
130 130 def patkind(name, dflt_pat='glob'):
131 131 """Split a string into an optional pattern kind prefix and the
132 132 actual pattern."""
133 133 for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre':
134 134 if name.startswith(prefix + ':'): return name.split(':', 1)
135 135 return dflt_pat, name
136 136
137 137 def globre(pat, head='^', tail='$'):
138 138 "convert a glob pattern into a regexp"
139 139 i, n = 0, len(pat)
140 140 res = ''
141 141 group = False
142 142 def peek(): return i < n and pat[i]
143 143 while i < n:
144 144 c = pat[i]
145 145 i = i+1
146 146 if c == '*':
147 147 if peek() == '*':
148 148 i += 1
149 149 res += '.*'
150 150 else:
151 151 res += '[^/]*'
152 152 elif c == '?':
153 153 res += '.'
154 154 elif c == '[':
155 155 j = i
156 156 if j < n and pat[j] in '!]':
157 157 j += 1
158 158 while j < n and pat[j] != ']':
159 159 j += 1
160 160 if j >= n:
161 161 res += '\\['
162 162 else:
163 163 stuff = pat[i:j].replace('\\','\\\\')
164 164 i = j + 1
165 165 if stuff[0] == '!':
166 166 stuff = '^' + stuff[1:]
167 167 elif stuff[0] == '^':
168 168 stuff = '\\' + stuff
169 169 res = '%s[%s]' % (res, stuff)
170 170 elif c == '{':
171 171 group = True
172 172 res += '(?:'
173 173 elif c == '}' and group:
174 174 res += ')'
175 175 group = False
176 176 elif c == ',' and group:
177 177 res += '|'
178 178 elif c == '\\':
179 179 p = peek()
180 180 if p:
181 181 i += 1
182 182 res += re.escape(p)
183 183 else:
184 184 res += re.escape(c)
185 185 else:
186 186 res += re.escape(c)
187 187 return head + res + tail
188 188
189 189 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
190 190
191 191 def pathto(n1, n2):
192 192 '''return the relative path from one place to another.
193 193 this returns a path in the form used by the local filesystem, not hg.'''
194 194 if not n1: return localpath(n2)
195 195 a, b = n1.split('/'), n2.split('/')
196 196 a.reverse()
197 197 b.reverse()
198 198 while a and b and a[-1] == b[-1]:
199 199 a.pop()
200 200 b.pop()
201 201 b.reverse()
202 202 return os.sep.join((['..'] * len(a)) + b)
203 203
204 204 def canonpath(root, cwd, myname):
205 205 """return the canonical path of myname, given cwd and root"""
206 206 if root == os.sep:
207 207 rootsep = os.sep
208 208 elif root.endswith(os.sep):
209 209 rootsep = root
210 210 else:
211 211 rootsep = root + os.sep
212 212 name = myname
213 213 if not os.path.isabs(name):
214 214 name = os.path.join(root, cwd, name)
215 215 name = os.path.normpath(name)
216 216 if name != rootsep and name.startswith(rootsep):
217 217 name = name[len(rootsep):]
218 218 audit_path(name)
219 219 return pconvert(name)
220 220 elif name == root:
221 221 return ''
222 222 else:
223 223 # Determine whether `name' is in the hierarchy at or beneath `root',
224 224 # by iterating name=dirname(name) until that causes no change (can't
225 225 # check name == '/', because that doesn't work on windows). For each
226 226 # `name', compare dev/inode numbers. If they match, the list `rel'
227 227 # holds the reversed list of components making up the relative file
228 228 # name we want.
229 229 root_st = os.stat(root)
230 230 rel = []
231 231 while True:
232 232 try:
233 233 name_st = os.stat(name)
234 234 except OSError:
235 235 break
236 236 if samestat(name_st, root_st):
237 237 rel.reverse()
238 238 name = os.path.join(*rel)
239 239 audit_path(name)
240 240 return pconvert(name)
241 241 dirname, basename = os.path.split(name)
242 242 rel.append(basename)
243 243 if dirname == name:
244 244 break
245 245 name = dirname
246 246
247 247 raise Abort('%s not under root' % myname)
248 248
249 249 def matcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
250 250 return _matcher(canonroot, cwd, names, inc, exc, head, 'glob', src)
251 251
252 252 def cmdmatcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
253 253 if os.name == 'nt':
254 254 dflt_pat = 'glob'
255 255 else:
256 256 dflt_pat = 'relpath'
257 257 return _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src)
258 258
259 259 def _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src):
260 260 """build a function to match a set of file patterns
261 261
262 262 arguments:
263 263 canonroot - the canonical root of the tree you're matching against
264 264 cwd - the current working directory, if relevant
265 265 names - patterns to find
266 266 inc - patterns to include
267 267 exc - patterns to exclude
268 268 head - a regex to prepend to patterns to control whether a match is rooted
269 269
270 270 a pattern is one of:
271 271 'glob:<rooted glob>'
272 272 're:<rooted regexp>'
273 273 'path:<rooted path>'
274 274 'relglob:<relative glob>'
275 275 'relpath:<relative path>'
276 276 'relre:<relative regexp>'
277 277 '<rooted path or regexp>'
278 278
279 279 returns:
280 280 a 3-tuple containing
281 281 - list of explicit non-pattern names passed in
282 282 - a bool match(filename) function
283 283 - a bool indicating if any patterns were passed in
284 284
285 285 todo:
286 286 make head regex a rooted bool
287 287 """
288 288
289 289 def contains_glob(name):
290 290 for c in name:
291 291 if c in _globchars: return True
292 292 return False
293 293
294 294 def regex(kind, name, tail):
295 295 '''convert a pattern into a regular expression'''
296 296 if kind == 're':
297 297 return name
298 298 elif kind == 'path':
299 299 return '^' + re.escape(name) + '(?:/|$)'
300 300 elif kind == 'relglob':
301 301 return head + globre(name, '(?:|.*/)', tail)
302 302 elif kind == 'relpath':
303 303 return head + re.escape(name) + tail
304 304 elif kind == 'relre':
305 305 if name.startswith('^'):
306 306 return name
307 307 return '.*' + name
308 308 return head + globre(name, '', tail)
309 309
310 310 def matchfn(pats, tail):
311 311 """build a matching function from a set of patterns"""
312 312 if not pats:
313 313 return
314 314 matches = []
315 315 for k, p in pats:
316 316 try:
317 317 pat = '(?:%s)' % regex(k, p, tail)
318 318 matches.append(re.compile(pat).match)
319 319 except re.error:
320 320 if src: raise Abort("%s: invalid pattern (%s): %s" % (src, k, p))
321 321 else: raise Abort("invalid pattern (%s): %s" % (k, p))
322 322
323 323 def buildfn(text):
324 324 for m in matches:
325 325 r = m(text)
326 326 if r:
327 327 return r
328 328
329 329 return buildfn
330 330
331 331 def globprefix(pat):
332 332 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
333 333 root = []
334 334 for p in pat.split(os.sep):
335 335 if contains_glob(p): break
336 336 root.append(p)
337 337 return '/'.join(root)
338 338
339 339 pats = []
340 340 files = []
341 341 roots = []
342 342 for kind, name in [patkind(p, dflt_pat) for p in names]:
343 343 if kind in ('glob', 'relpath'):
344 344 name = canonpath(canonroot, cwd, name)
345 345 if name == '':
346 346 kind, name = 'glob', '**'
347 347 if kind in ('glob', 'path', 're'):
348 348 pats.append((kind, name))
349 349 if kind == 'glob':
350 350 root = globprefix(name)
351 351 if root: roots.append(root)
352 352 elif kind == 'relpath':
353 353 files.append((kind, name))
354 354 roots.append(name)
355 355
356 356 patmatch = matchfn(pats, '$') or always
357 357 filematch = matchfn(files, '(?:/|$)') or always
358 358 incmatch = always
359 359 if inc:
360 360 incmatch = matchfn(map(patkind, inc), '(?:/|$)')
361 361 excmatch = lambda fn: False
362 362 if exc:
363 363 excmatch = matchfn(map(patkind, exc), '(?:/|$)')
364 364
365 365 return (roots,
366 366 lambda fn: (incmatch(fn) and not excmatch(fn) and
367 367 (fn.endswith('/') or
368 368 (not pats and not files) or
369 369 (pats and patmatch(fn)) or
370 370 (files and filematch(fn)))),
371 371 (inc or exc or (pats and pats != [('glob', '**')])) and True)
372 372
373 373 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
374 374 '''enhanced shell command execution.
375 375 run with environment maybe modified, maybe in different dir.
376 376
377 377 if command fails and onerr is None, return status. if ui object,
378 378 print error message and return status, else raise onerr object as
379 379 exception.'''
380 380 oldenv = {}
381 381 for k in environ:
382 382 oldenv[k] = os.environ.get(k)
383 383 if cwd is not None:
384 384 oldcwd = os.getcwd()
385 385 try:
386 386 for k, v in environ.iteritems():
387 387 os.environ[k] = str(v)
388 388 if cwd is not None and oldcwd != cwd:
389 389 os.chdir(cwd)
390 390 rc = os.system(cmd)
391 391 if rc and onerr:
392 392 errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]),
393 393 explain_exit(rc)[0])
394 394 if errprefix:
395 395 errmsg = '%s: %s' % (errprefix, errmsg)
396 396 try:
397 397 onerr.warn(errmsg + '\n')
398 398 except AttributeError:
399 399 raise onerr(errmsg)
400 400 return rc
401 401 finally:
402 402 for k, v in oldenv.iteritems():
403 403 if v is None:
404 404 del os.environ[k]
405 405 else:
406 406 os.environ[k] = v
407 407 if cwd is not None and oldcwd != cwd:
408 408 os.chdir(oldcwd)
409 409
410 410 def rename(src, dst):
411 411 """forcibly rename a file"""
412 412 try:
413 413 os.rename(src, dst)
414 414 except OSError, err:
415 415 # on windows, rename to existing file is not allowed, so we
416 416 # must delete destination first. but if file is open, unlink
417 417 # schedules it for delete but does not delete it. rename
418 418 # happens immediately even for open files, so we create
419 419 # temporary file, delete it, rename destination to that name,
420 420 # then delete that. then rename is safe to do.
421 421 fd, temp = tempfile.mkstemp(dir=os.path.dirname(dst) or '.')
422 422 os.close(fd)
423 423 os.unlink(temp)
424 424 os.rename(dst, temp)
425 425 os.unlink(temp)
426 426 os.rename(src, dst)
427 427
428 428 def unlink(f):
429 429 """unlink and remove the directory if it is empty"""
430 430 os.unlink(f)
431 431 # try removing directories that might now be empty
432 432 try:
433 433 os.removedirs(os.path.dirname(f))
434 434 except OSError:
435 435 pass
436 436
437 437 def copyfiles(src, dst, hardlink=None):
438 438 """Copy a directory tree using hardlinks if possible"""
439 439
440 440 if hardlink is None:
441 441 hardlink = (os.stat(src).st_dev ==
442 442 os.stat(os.path.dirname(dst)).st_dev)
443 443
444 444 if os.path.isdir(src):
445 445 os.mkdir(dst)
446 446 for name in os.listdir(src):
447 447 srcname = os.path.join(src, name)
448 448 dstname = os.path.join(dst, name)
449 449 copyfiles(srcname, dstname, hardlink)
450 450 else:
451 451 if hardlink:
452 452 try:
453 453 os_link(src, dst)
454 454 except (IOError, OSError):
455 455 hardlink = False
456 456 shutil.copy(src, dst)
457 457 else:
458 458 shutil.copy(src, dst)
459 459
460 460 def audit_path(path):
461 461 """Abort if path contains dangerous components"""
462 462 parts = os.path.normcase(path).split(os.sep)
463 463 if (os.path.splitdrive(path)[0] or parts[0] in ('.hg', '')
464 464 or os.pardir in parts):
465 465 raise Abort(_("path contains illegal component: %s\n") % path)
466 466
467 467 def _makelock_file(info, pathname):
468 468 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
469 469 os.write(ld, info)
470 470 os.close(ld)
471 471
472 472 def _readlock_file(pathname):
473 473 return posixfile(pathname).read()
474 474
475 475 def nlinks(pathname):
476 476 """Return number of hardlinks for the given file."""
477 return os.stat(pathname).st_nlink
477 return os.lstat(pathname).st_nlink
478 478
479 479 if hasattr(os, 'link'):
480 480 os_link = os.link
481 481 else:
482 482 def os_link(src, dst):
483 483 raise OSError(0, _("Hardlinks not supported"))
484 484
485 485 def fstat(fp):
486 486 '''stat file object that may not have fileno method.'''
487 487 try:
488 488 return os.fstat(fp.fileno())
489 489 except AttributeError:
490 490 return os.stat(fp.name)
491 491
492 492 posixfile = file
493 493
494 494 def is_win_9x():
495 495 '''return true if run on windows 95, 98 or me.'''
496 496 try:
497 497 return sys.getwindowsversion()[3] == 1
498 498 except AttributeError:
499 499 return os.name == 'nt' and 'command' in os.environ.get('comspec', '')
500 500
501 501 # Platform specific variants
502 502 if os.name == 'nt':
503 503 demandload(globals(), "msvcrt")
504 504 nulldev = 'NUL:'
505 505
506 506 class winstdout:
507 507 '''stdout on windows misbehaves if sent through a pipe'''
508 508
509 509 def __init__(self, fp):
510 510 self.fp = fp
511 511
512 512 def __getattr__(self, key):
513 513 return getattr(self.fp, key)
514 514
515 515 def close(self):
516 516 try:
517 517 self.fp.close()
518 518 except: pass
519 519
520 520 def write(self, s):
521 521 try:
522 522 return self.fp.write(s)
523 523 except IOError, inst:
524 524 if inst.errno != 0: raise
525 525 self.close()
526 526 raise IOError(errno.EPIPE, 'Broken pipe')
527 527
528 528 sys.stdout = winstdout(sys.stdout)
529 529
530 530 def system_rcpath():
531 531 try:
532 532 return system_rcpath_win32()
533 533 except:
534 534 return [r'c:\mercurial\mercurial.ini']
535 535
536 536 def os_rcpath():
537 537 '''return default os-specific hgrc search path'''
538 538 path = system_rcpath()
539 539 path.append(user_rcpath())
540 540 userprofile = os.environ.get('USERPROFILE')
541 541 if userprofile:
542 542 path.append(os.path.join(userprofile, 'mercurial.ini'))
543 543 return path
544 544
545 545 def user_rcpath():
546 546 '''return os-specific hgrc search path to the user dir'''
547 547 return os.path.join(os.path.expanduser('~'), 'mercurial.ini')
548 548
549 549 def parse_patch_output(output_line):
550 550 """parses the output produced by patch and returns the file name"""
551 551 pf = output_line[14:]
552 552 if pf[0] == '`':
553 553 pf = pf[1:-1] # Remove the quotes
554 554 return pf
555 555
556 556 def testpid(pid):
557 557 '''return False if pid dead, True if running or not known'''
558 558 return True
559 559
560 560 def is_exec(f, last):
561 561 return last
562 562
563 563 def set_exec(f, mode):
564 564 pass
565 565
566 566 def set_binary(fd):
567 567 msvcrt.setmode(fd.fileno(), os.O_BINARY)
568 568
569 569 def pconvert(path):
570 570 return path.replace("\\", "/")
571 571
572 572 def localpath(path):
573 573 return path.replace('/', '\\')
574 574
575 575 def normpath(path):
576 576 return pconvert(os.path.normpath(path))
577 577
578 578 makelock = _makelock_file
579 579 readlock = _readlock_file
580 580
581 581 def samestat(s1, s2):
582 582 return False
583 583
584 584 def explain_exit(code):
585 585 return _("exited with status %d") % code, code
586 586
587 587 try:
588 588 # override functions with win32 versions if possible
589 589 from util_win32 import *
590 590 if not is_win_9x():
591 591 posixfile = posixfile_nt
592 592 except ImportError:
593 593 pass
594 594
595 595 else:
596 596 nulldev = '/dev/null'
597 597
598 598 def rcfiles(path):
599 599 rcs = [os.path.join(path, 'hgrc')]
600 600 rcdir = os.path.join(path, 'hgrc.d')
601 601 try:
602 602 rcs.extend([os.path.join(rcdir, f) for f in os.listdir(rcdir)
603 603 if f.endswith(".rc")])
604 604 except OSError, inst: pass
605 605 return rcs
606 606
607 607 def os_rcpath():
608 608 '''return default os-specific hgrc search path'''
609 609 path = []
610 610 # old mod_python does not set sys.argv
611 611 if len(getattr(sys, 'argv', [])) > 0:
612 612 path.extend(rcfiles(os.path.dirname(sys.argv[0]) +
613 613 '/../etc/mercurial'))
614 614 path.extend(rcfiles('/etc/mercurial'))
615 615 path.append(os.path.expanduser('~/.hgrc'))
616 616 path = [os.path.normpath(f) for f in path]
617 617 return path
618 618
619 619 def parse_patch_output(output_line):
620 620 """parses the output produced by patch and returns the file name"""
621 621 pf = output_line[14:]
622 622 if pf.startswith("'") and pf.endswith("'") and pf.find(" ") >= 0:
623 623 pf = pf[1:-1] # Remove the quotes
624 624 return pf
625 625
626 626 def is_exec(f, last):
627 627 """check whether a file is executable"""
628 return (os.stat(f).st_mode & 0100 != 0)
628 return (os.lstat(f).st_mode & 0100 != 0)
629 629
630 630 def set_exec(f, mode):
631 s = os.stat(f).st_mode
631 s = os.lstat(f).st_mode
632 632 if (s & 0100 != 0) == mode:
633 633 return
634 634 if mode:
635 635 # Turn on +x for every +r bit when making a file executable
636 636 # and obey umask.
637 637 umask = os.umask(0)
638 638 os.umask(umask)
639 639 os.chmod(f, s | (s & 0444) >> 2 & ~umask)
640 640 else:
641 641 os.chmod(f, s & 0666)
642 642
643 643 def set_binary(fd):
644 644 pass
645 645
646 646 def pconvert(path):
647 647 return path
648 648
649 649 def localpath(path):
650 650 return path
651 651
652 652 normpath = os.path.normpath
653 653 samestat = os.path.samestat
654 654
655 655 def makelock(info, pathname):
656 656 try:
657 657 os.symlink(info, pathname)
658 658 except OSError, why:
659 659 if why.errno == errno.EEXIST:
660 660 raise
661 661 else:
662 662 _makelock_file(info, pathname)
663 663
664 664 def readlock(pathname):
665 665 try:
666 666 return os.readlink(pathname)
667 667 except OSError, why:
668 668 if why.errno == errno.EINVAL:
669 669 return _readlock_file(pathname)
670 670 else:
671 671 raise
672 672
673 673 def testpid(pid):
674 674 '''return False if pid dead, True if running or not sure'''
675 675 try:
676 676 os.kill(pid, 0)
677 677 return True
678 678 except OSError, inst:
679 679 return inst.errno != errno.ESRCH
680 680
681 681 def explain_exit(code):
682 682 """return a 2-tuple (desc, code) describing a process's status"""
683 683 if os.WIFEXITED(code):
684 684 val = os.WEXITSTATUS(code)
685 685 return _("exited with status %d") % val, val
686 686 elif os.WIFSIGNALED(code):
687 687 val = os.WTERMSIG(code)
688 688 return _("killed by signal %d") % val, val
689 689 elif os.WIFSTOPPED(code):
690 690 val = os.WSTOPSIG(code)
691 691 return _("stopped by signal %d") % val, val
692 692 raise ValueError(_("invalid exit code"))
693 693
694 694 def opener(base, audit=True):
695 695 """
696 696 return a function that opens files relative to base
697 697
698 698 this function is used to hide the details of COW semantics and
699 699 remote file access from higher level code.
700 700 """
701 701 p = base
702 702 audit_p = audit
703 703
704 704 def mktempcopy(name):
705 705 d, fn = os.path.split(name)
706 706 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
707 707 os.close(fd)
708 708 ofp = posixfile(temp, "wb")
709 709 try:
710 710 try:
711 711 ifp = posixfile(name, "rb")
712 712 except IOError, inst:
713 713 if not getattr(inst, 'filename', None):
714 714 inst.filename = name
715 715 raise
716 716 for chunk in filechunkiter(ifp):
717 717 ofp.write(chunk)
718 718 ifp.close()
719 719 ofp.close()
720 720 except:
721 721 try: os.unlink(temp)
722 722 except: pass
723 723 raise
724 724 st = os.lstat(name)
725 725 os.chmod(temp, st.st_mode)
726 726 return temp
727 727
728 728 class atomictempfile(posixfile):
729 729 """the file will only be copied when rename is called"""
730 730 def __init__(self, name, mode):
731 731 self.__name = name
732 732 self.temp = mktempcopy(name)
733 733 posixfile.__init__(self, self.temp, mode)
734 734 def rename(self):
735 735 if not self.closed:
736 736 posixfile.close(self)
737 737 rename(self.temp, localpath(self.__name))
738 738 def __del__(self):
739 739 if not self.closed:
740 740 try:
741 741 os.unlink(self.temp)
742 742 except: pass
743 743 posixfile.close(self)
744 744
745 745 class atomicfile(atomictempfile):
746 746 """the file will only be copied on close"""
747 747 def __init__(self, name, mode):
748 748 atomictempfile.__init__(self, name, mode)
749 749 def close(self):
750 750 self.rename()
751 751 def __del__(self):
752 752 self.rename()
753 753
754 754 def o(path, mode="r", text=False, atomic=False, atomictemp=False):
755 755 if audit_p:
756 756 audit_path(path)
757 757 f = os.path.join(p, path)
758 758
759 759 if not text:
760 760 mode += "b" # for that other OS
761 761
762 762 if mode[0] != "r":
763 763 try:
764 764 nlink = nlinks(f)
765 765 except OSError:
766 766 d = os.path.dirname(f)
767 767 if not os.path.isdir(d):
768 768 os.makedirs(d)
769 769 else:
770 770 if atomic:
771 771 return atomicfile(f, mode)
772 772 elif atomictemp:
773 773 return atomictempfile(f, mode)
774 774 if nlink > 1:
775 775 rename(mktempcopy(f), f)
776 776 return posixfile(f, mode)
777 777
778 778 return o
779 779
780 780 class chunkbuffer(object):
781 781 """Allow arbitrary sized chunks of data to be efficiently read from an
782 782 iterator over chunks of arbitrary size."""
783 783
784 784 def __init__(self, in_iter, targetsize = 2**16):
785 785 """in_iter is the iterator that's iterating over the input chunks.
786 786 targetsize is how big a buffer to try to maintain."""
787 787 self.in_iter = iter(in_iter)
788 788 self.buf = ''
789 789 self.targetsize = int(targetsize)
790 790 if self.targetsize <= 0:
791 791 raise ValueError(_("targetsize must be greater than 0, was %d") %
792 792 targetsize)
793 793 self.iterempty = False
794 794
795 795 def fillbuf(self):
796 796 """Ignore target size; read every chunk from iterator until empty."""
797 797 if not self.iterempty:
798 798 collector = cStringIO.StringIO()
799 799 collector.write(self.buf)
800 800 for ch in self.in_iter:
801 801 collector.write(ch)
802 802 self.buf = collector.getvalue()
803 803 self.iterempty = True
804 804
805 805 def read(self, l):
806 806 """Read L bytes of data from the iterator of chunks of data.
807 807 Returns less than L bytes if the iterator runs dry."""
808 808 if l > len(self.buf) and not self.iterempty:
809 809 # Clamp to a multiple of self.targetsize
810 810 targetsize = self.targetsize * ((l // self.targetsize) + 1)
811 811 collector = cStringIO.StringIO()
812 812 collector.write(self.buf)
813 813 collected = len(self.buf)
814 814 for chunk in self.in_iter:
815 815 collector.write(chunk)
816 816 collected += len(chunk)
817 817 if collected >= targetsize:
818 818 break
819 819 if collected < targetsize:
820 820 self.iterempty = True
821 821 self.buf = collector.getvalue()
822 822 s, self.buf = self.buf[:l], buffer(self.buf, l)
823 823 return s
824 824
825 825 def filechunkiter(f, size = 65536):
826 826 """Create a generator that produces all the data in the file size
827 827 (default 65536) bytes at a time. Chunks may be less than size
828 828 bytes if the chunk is the last chunk in the file, or the file is a
829 829 socket or some other type of file that sometimes reads less data
830 830 than is requested."""
831 831 s = f.read(size)
832 832 while len(s) > 0:
833 833 yield s
834 834 s = f.read(size)
835 835
836 836 def makedate():
837 837 lt = time.localtime()
838 838 if lt[8] == 1 and time.daylight:
839 839 tz = time.altzone
840 840 else:
841 841 tz = time.timezone
842 842 return time.mktime(lt), tz
843 843
844 844 def datestr(date=None, format='%a %b %d %H:%M:%S %Y', timezone=True):
845 845 """represent a (unixtime, offset) tuple as a localized time.
846 846 unixtime is seconds since the epoch, and offset is the time zone's
847 847 number of seconds away from UTC. if timezone is false, do not
848 848 append time zone to string."""
849 849 t, tz = date or makedate()
850 850 s = time.strftime(format, time.gmtime(float(t) - tz))
851 851 if timezone:
852 852 s += " %+03d%02d" % (-tz / 3600, ((-tz % 3600) / 60))
853 853 return s
854 854
855 855 def shortuser(user):
856 856 """Return a short representation of a user name or email address."""
857 857 f = user.find('@')
858 858 if f >= 0:
859 859 user = user[:f]
860 860 f = user.find('<')
861 861 if f >= 0:
862 862 user = user[f+1:]
863 863 return user
864 864
865 865 def walkrepos(path):
866 866 '''yield every hg repository under path, recursively.'''
867 867 def errhandler(err):
868 868 if err.filename == path:
869 869 raise err
870 870
871 871 for root, dirs, files in os.walk(path, onerror=errhandler):
872 872 for d in dirs:
873 873 if d == '.hg':
874 874 yield root
875 875 dirs[:] = []
876 876 break
877 877
878 878 _rcpath = None
879 879
880 880 def rcpath():
881 881 '''return hgrc search path. if env var HGRCPATH is set, use it.
882 882 for each item in path, if directory, use files ending in .rc,
883 883 else use item.
884 884 make HGRCPATH empty to only look in .hg/hgrc of current repo.
885 885 if no HGRCPATH, use default os-specific path.'''
886 886 global _rcpath
887 887 if _rcpath is None:
888 888 if 'HGRCPATH' in os.environ:
889 889 _rcpath = []
890 890 for p in os.environ['HGRCPATH'].split(os.pathsep):
891 891 if not p: continue
892 892 if os.path.isdir(p):
893 893 for f in os.listdir(p):
894 894 if f.endswith('.rc'):
895 895 _rcpath.append(os.path.join(p, f))
896 896 else:
897 897 _rcpath.append(p)
898 898 else:
899 899 _rcpath = os_rcpath()
900 900 return _rcpath
@@ -1,299 +1,299 b''
1 1 # util_win32.py - utility functions that use win32 API
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of
7 7 # the GNU General Public License, incorporated herein by reference.
8 8
9 9 # Mark Hammond's win32all package allows better functionality on
10 10 # Windows. this module overrides definitions in util.py. if not
11 11 # available, import of this module will fail, and generic code will be
12 12 # used.
13 13
14 14 import win32api
15 15
16 16 from demandload import *
17 17 from i18n import gettext as _
18 18 demandload(globals(), 'errno os pywintypes win32con win32file win32process')
19 19 demandload(globals(), 'cStringIO win32com.shell:shell,shellcon winerror')
20 20
21 21 class WinError:
22 22 winerror_map = {
23 23 winerror.ERROR_ACCESS_DENIED: errno.EACCES,
24 24 winerror.ERROR_ACCOUNT_DISABLED: errno.EACCES,
25 25 winerror.ERROR_ACCOUNT_RESTRICTION: errno.EACCES,
26 26 winerror.ERROR_ALREADY_ASSIGNED: errno.EBUSY,
27 27 winerror.ERROR_ALREADY_EXISTS: errno.EEXIST,
28 28 winerror.ERROR_ARITHMETIC_OVERFLOW: errno.ERANGE,
29 29 winerror.ERROR_BAD_COMMAND: errno.EIO,
30 30 winerror.ERROR_BAD_DEVICE: errno.ENODEV,
31 31 winerror.ERROR_BAD_DRIVER_LEVEL: errno.ENXIO,
32 32 winerror.ERROR_BAD_EXE_FORMAT: errno.ENOEXEC,
33 33 winerror.ERROR_BAD_FORMAT: errno.ENOEXEC,
34 34 winerror.ERROR_BAD_LENGTH: errno.EINVAL,
35 35 winerror.ERROR_BAD_PATHNAME: errno.ENOENT,
36 36 winerror.ERROR_BAD_PIPE: errno.EPIPE,
37 37 winerror.ERROR_BAD_UNIT: errno.ENODEV,
38 38 winerror.ERROR_BAD_USERNAME: errno.EINVAL,
39 39 winerror.ERROR_BROKEN_PIPE: errno.EPIPE,
40 40 winerror.ERROR_BUFFER_OVERFLOW: errno.ENAMETOOLONG,
41 41 winerror.ERROR_BUSY: errno.EBUSY,
42 42 winerror.ERROR_BUSY_DRIVE: errno.EBUSY,
43 43 winerror.ERROR_CALL_NOT_IMPLEMENTED: errno.ENOSYS,
44 44 winerror.ERROR_CANNOT_MAKE: errno.EACCES,
45 45 winerror.ERROR_CANTOPEN: errno.EIO,
46 46 winerror.ERROR_CANTREAD: errno.EIO,
47 47 winerror.ERROR_CANTWRITE: errno.EIO,
48 48 winerror.ERROR_CRC: errno.EIO,
49 49 winerror.ERROR_CURRENT_DIRECTORY: errno.EACCES,
50 50 winerror.ERROR_DEVICE_IN_USE: errno.EBUSY,
51 51 winerror.ERROR_DEV_NOT_EXIST: errno.ENODEV,
52 52 winerror.ERROR_DIRECTORY: errno.EINVAL,
53 53 winerror.ERROR_DIR_NOT_EMPTY: errno.ENOTEMPTY,
54 54 winerror.ERROR_DISK_CHANGE: errno.EIO,
55 55 winerror.ERROR_DISK_FULL: errno.ENOSPC,
56 56 winerror.ERROR_DRIVE_LOCKED: errno.EBUSY,
57 57 winerror.ERROR_ENVVAR_NOT_FOUND: errno.EINVAL,
58 58 winerror.ERROR_EXE_MARKED_INVALID: errno.ENOEXEC,
59 59 winerror.ERROR_FILENAME_EXCED_RANGE: errno.ENAMETOOLONG,
60 60 winerror.ERROR_FILE_EXISTS: errno.EEXIST,
61 61 winerror.ERROR_FILE_INVALID: errno.ENODEV,
62 62 winerror.ERROR_FILE_NOT_FOUND: errno.ENOENT,
63 63 winerror.ERROR_GEN_FAILURE: errno.EIO,
64 64 winerror.ERROR_HANDLE_DISK_FULL: errno.ENOSPC,
65 65 winerror.ERROR_INSUFFICIENT_BUFFER: errno.ENOMEM,
66 66 winerror.ERROR_INVALID_ACCESS: errno.EACCES,
67 67 winerror.ERROR_INVALID_ADDRESS: errno.EFAULT,
68 68 winerror.ERROR_INVALID_BLOCK: errno.EFAULT,
69 69 winerror.ERROR_INVALID_DATA: errno.EINVAL,
70 70 winerror.ERROR_INVALID_DRIVE: errno.ENODEV,
71 71 winerror.ERROR_INVALID_EXE_SIGNATURE: errno.ENOEXEC,
72 72 winerror.ERROR_INVALID_FLAGS: errno.EINVAL,
73 73 winerror.ERROR_INVALID_FUNCTION: errno.ENOSYS,
74 74 winerror.ERROR_INVALID_HANDLE: errno.EBADF,
75 75 winerror.ERROR_INVALID_LOGON_HOURS: errno.EACCES,
76 76 winerror.ERROR_INVALID_NAME: errno.EINVAL,
77 77 winerror.ERROR_INVALID_OWNER: errno.EINVAL,
78 78 winerror.ERROR_INVALID_PARAMETER: errno.EINVAL,
79 79 winerror.ERROR_INVALID_PASSWORD: errno.EPERM,
80 80 winerror.ERROR_INVALID_PRIMARY_GROUP: errno.EINVAL,
81 81 winerror.ERROR_INVALID_SIGNAL_NUMBER: errno.EINVAL,
82 82 winerror.ERROR_INVALID_TARGET_HANDLE: errno.EIO,
83 83 winerror.ERROR_INVALID_WORKSTATION: errno.EACCES,
84 84 winerror.ERROR_IO_DEVICE: errno.EIO,
85 85 winerror.ERROR_IO_INCOMPLETE: errno.EINTR,
86 86 winerror.ERROR_LOCKED: errno.EBUSY,
87 87 winerror.ERROR_LOCK_VIOLATION: errno.EACCES,
88 88 winerror.ERROR_LOGON_FAILURE: errno.EACCES,
89 89 winerror.ERROR_MAPPED_ALIGNMENT: errno.EINVAL,
90 90 winerror.ERROR_META_EXPANSION_TOO_LONG: errno.E2BIG,
91 91 winerror.ERROR_MORE_DATA: errno.EPIPE,
92 92 winerror.ERROR_NEGATIVE_SEEK: errno.ESPIPE,
93 93 winerror.ERROR_NOACCESS: errno.EFAULT,
94 94 winerror.ERROR_NONE_MAPPED: errno.EINVAL,
95 95 winerror.ERROR_NOT_ENOUGH_MEMORY: errno.ENOMEM,
96 96 winerror.ERROR_NOT_READY: errno.EAGAIN,
97 97 winerror.ERROR_NOT_SAME_DEVICE: errno.EXDEV,
98 98 winerror.ERROR_NO_DATA: errno.EPIPE,
99 99 winerror.ERROR_NO_MORE_SEARCH_HANDLES: errno.EIO,
100 100 winerror.ERROR_NO_PROC_SLOTS: errno.EAGAIN,
101 101 winerror.ERROR_NO_SUCH_PRIVILEGE: errno.EACCES,
102 102 winerror.ERROR_OPEN_FAILED: errno.EIO,
103 103 winerror.ERROR_OPEN_FILES: errno.EBUSY,
104 104 winerror.ERROR_OPERATION_ABORTED: errno.EINTR,
105 105 winerror.ERROR_OUTOFMEMORY: errno.ENOMEM,
106 106 winerror.ERROR_PASSWORD_EXPIRED: errno.EACCES,
107 107 winerror.ERROR_PATH_BUSY: errno.EBUSY,
108 108 winerror.ERROR_PATH_NOT_FOUND: errno.ENOENT,
109 109 winerror.ERROR_PIPE_BUSY: errno.EBUSY,
110 110 winerror.ERROR_PIPE_CONNECTED: errno.EPIPE,
111 111 winerror.ERROR_PIPE_LISTENING: errno.EPIPE,
112 112 winerror.ERROR_PIPE_NOT_CONNECTED: errno.EPIPE,
113 113 winerror.ERROR_PRIVILEGE_NOT_HELD: errno.EACCES,
114 114 winerror.ERROR_READ_FAULT: errno.EIO,
115 115 winerror.ERROR_SEEK: errno.EIO,
116 116 winerror.ERROR_SEEK_ON_DEVICE: errno.ESPIPE,
117 117 winerror.ERROR_SHARING_BUFFER_EXCEEDED: errno.ENFILE,
118 118 winerror.ERROR_SHARING_VIOLATION: errno.EACCES,
119 119 winerror.ERROR_STACK_OVERFLOW: errno.ENOMEM,
120 120 winerror.ERROR_SWAPERROR: errno.ENOENT,
121 121 winerror.ERROR_TOO_MANY_MODULES: errno.EMFILE,
122 122 winerror.ERROR_TOO_MANY_OPEN_FILES: errno.EMFILE,
123 123 winerror.ERROR_UNRECOGNIZED_MEDIA: errno.ENXIO,
124 124 winerror.ERROR_UNRECOGNIZED_VOLUME: errno.ENODEV,
125 125 winerror.ERROR_WAIT_NO_CHILDREN: errno.ECHILD,
126 126 winerror.ERROR_WRITE_FAULT: errno.EIO,
127 127 winerror.ERROR_WRITE_PROTECT: errno.EROFS,
128 128 }
129 129
130 130 def __init__(self, err):
131 131 self.win_errno, self.win_function, self.win_strerror = err
132 132 if self.win_strerror.endswith('.'):
133 133 self.win_strerror = self.win_strerror[:-1]
134 134
135 135 class WinIOError(WinError, IOError):
136 136 def __init__(self, err, filename=None):
137 137 WinError.__init__(self, err)
138 138 IOError.__init__(self, self.winerror_map.get(self.win_errno, 0),
139 139 self.win_strerror)
140 140 self.filename = filename
141 141
142 142 class WinOSError(WinError, OSError):
143 143 def __init__(self, err):
144 144 WinError.__init__(self, err)
145 145 OSError.__init__(self, self.winerror_map.get(self.win_errno, 0),
146 146 self.win_strerror)
147 147
148 148 def os_link(src, dst):
149 149 # NB will only succeed on NTFS
150 150 try:
151 151 win32file.CreateHardLink(dst, src)
152 152 except pywintypes.error, details:
153 153 raise WinOSError(details)
154 154
155 155 def nlinks(pathname):
156 156 """Return number of hardlinks for the given file."""
157 157 try:
158 158 fh = win32file.CreateFile(pathname,
159 159 win32file.GENERIC_READ, win32file.FILE_SHARE_READ,
160 160 None, win32file.OPEN_EXISTING, 0, None)
161 161 res = win32file.GetFileInformationByHandle(fh)
162 162 fh.Close()
163 163 return res[7]
164 164 except pywintypes.error:
165 return os.stat(pathname).st_nlink
165 return os.lstat(pathname).st_nlink
166 166
167 167 def testpid(pid):
168 168 '''return True if pid is still running or unable to
169 169 determine, False otherwise'''
170 170 try:
171 171 handle = win32api.OpenProcess(
172 172 win32con.PROCESS_QUERY_INFORMATION, False, pid)
173 173 if handle:
174 174 status = win32process.GetExitCodeProcess(handle)
175 175 return status == win32con.STILL_ACTIVE
176 176 except pywintypes.error, details:
177 177 return details[0] != winerror.ERROR_INVALID_PARAMETER
178 178 return True
179 179
180 180 def system_rcpath_win32():
181 181 '''return default os-specific hgrc search path'''
182 182 proc = win32api.GetCurrentProcess()
183 183 try:
184 184 # This will fail on windows < NT
185 185 filename = win32process.GetModuleFileNameEx(proc, 0)
186 186 except:
187 187 filename = win32api.GetModuleFileName(0)
188 188 return [os.path.join(os.path.dirname(filename), 'mercurial.ini')]
189 189
190 190 def user_rcpath():
191 191 '''return os-specific hgrc search path to the user dir'''
192 192 userdir = os.path.expanduser('~')
193 193 if userdir == '~':
194 194 # We are on win < nt: fetch the APPDATA directory location and use
195 195 # the parent directory as the user home dir.
196 196 appdir = shell.SHGetPathFromIDList(
197 197 shell.SHGetSpecialFolderLocation(0, shellcon.CSIDL_APPDATA))
198 198 userdir = os.path.dirname(appdir)
199 199 return os.path.join(userdir, 'mercurial.ini')
200 200
201 201 class posixfile_nt(object):
202 202 '''file object with posix-like semantics. on windows, normal
203 203 files can not be deleted or renamed if they are open. must open
204 204 with win32file.FILE_SHARE_DELETE. this flag does not exist on
205 205 windows < nt, so do not use this class there.'''
206 206
207 207 # tried to use win32file._open_osfhandle to pass fd to os.fdopen,
208 208 # but does not work at all. wrap win32 file api instead.
209 209
210 210 def __init__(self, name, mode='rb'):
211 211 access = 0
212 212 if 'r' in mode or '+' in mode:
213 213 access |= win32file.GENERIC_READ
214 214 if 'w' in mode or 'a' in mode:
215 215 access |= win32file.GENERIC_WRITE
216 216 if 'r' in mode:
217 217 creation = win32file.OPEN_EXISTING
218 218 elif 'a' in mode:
219 219 creation = win32file.OPEN_ALWAYS
220 220 else:
221 221 creation = win32file.CREATE_ALWAYS
222 222 try:
223 223 self.handle = win32file.CreateFile(name,
224 224 access,
225 225 win32file.FILE_SHARE_READ |
226 226 win32file.FILE_SHARE_WRITE |
227 227 win32file.FILE_SHARE_DELETE,
228 228 None,
229 229 creation,
230 230 win32file.FILE_ATTRIBUTE_NORMAL,
231 231 0)
232 232 except pywintypes.error, err:
233 233 raise WinIOError(err, name)
234 234 self.closed = False
235 235 self.name = name
236 236 self.mode = mode
237 237
238 238 def __iter__(self):
239 239 for line in self.read().splitlines(True):
240 240 yield line
241 241
242 242 def read(self, count=-1):
243 243 try:
244 244 cs = cStringIO.StringIO()
245 245 while count:
246 246 wincount = int(count)
247 247 if wincount == -1:
248 248 wincount = 1048576
249 249 val, data = win32file.ReadFile(self.handle, wincount)
250 250 if not data: break
251 251 cs.write(data)
252 252 if count != -1:
253 253 count -= len(data)
254 254 return cs.getvalue()
255 255 except pywintypes.error, err:
256 256 raise WinIOError(err)
257 257
258 258 def write(self, data):
259 259 try:
260 260 if 'a' in self.mode:
261 261 win32file.SetFilePointer(self.handle, 0, win32file.FILE_END)
262 262 nwrit = 0
263 263 while nwrit < len(data):
264 264 val, nwrit = win32file.WriteFile(self.handle, data)
265 265 data = data[nwrit:]
266 266 except pywintypes.error, err:
267 267 raise WinIOError(err)
268 268
269 269 def seek(self, pos, whence=0):
270 270 try:
271 271 win32file.SetFilePointer(self.handle, int(pos), whence)
272 272 except pywintypes.error, err:
273 273 raise WinIOError(err)
274 274
275 275 def tell(self):
276 276 try:
277 277 return win32file.SetFilePointer(self.handle, 0,
278 278 win32file.FILE_CURRENT)
279 279 except pywintypes.error, err:
280 280 raise WinIOError(err)
281 281
282 282 def close(self):
283 283 if not self.closed:
284 284 self.handle = None
285 285 self.closed = True
286 286
287 287 def flush(self):
288 288 try:
289 289 win32file.FlushFileBuffers(self.handle)
290 290 except pywintypes.error, err:
291 291 raise WinIOError(err)
292 292
293 293 def truncate(self, pos=0):
294 294 try:
295 295 win32file.SetFilePointer(self.handle, int(pos),
296 296 win32file.FILE_BEGIN)
297 297 win32file.SetEndOfFile(self.handle)
298 298 except pywintypes.error, err:
299 299 raise WinIOError(err)
General Comments 0
You need to be logged in to leave comments. Login now