##// END OF EJS Templates
replace os.stat with os.lstat in some where.
Vadim Gelfer -
r2448:b77a2ef6 default
parent child Browse files
Show More
@@ -1,487 +1,487 b''
1 """
1 """
2 dirstate.py - working directory tracking for mercurial
2 dirstate.py - working directory tracking for mercurial
3
3
4 Copyright 2005 Matt Mackall <mpm@selenic.com>
4 Copyright 2005 Matt Mackall <mpm@selenic.com>
5
5
6 This software may be used and distributed according to the terms
6 This software may be used and distributed according to the terms
7 of the GNU General Public License, incorporated herein by reference.
7 of the GNU General Public License, incorporated herein by reference.
8 """
8 """
9
9
10 import struct, os
10 import struct, os
11 from node import *
11 from node import *
12 from i18n import gettext as _
12 from i18n import gettext as _
13 from demandload import *
13 from demandload import *
14 demandload(globals(), "time bisect stat util re errno")
14 demandload(globals(), "time bisect stat util re errno")
15
15
16 class dirstate(object):
16 class dirstate(object):
17 format = ">cllll"
17 format = ">cllll"
18
18
19 def __init__(self, opener, ui, root):
19 def __init__(self, opener, ui, root):
20 self.opener = opener
20 self.opener = opener
21 self.root = root
21 self.root = root
22 self.dirty = 0
22 self.dirty = 0
23 self.ui = ui
23 self.ui = ui
24 self.map = None
24 self.map = None
25 self.pl = None
25 self.pl = None
26 self.copies = {}
26 self.copies = {}
27 self.ignorefunc = None
27 self.ignorefunc = None
28 self.blockignore = False
28 self.blockignore = False
29
29
30 def wjoin(self, f):
30 def wjoin(self, f):
31 return os.path.join(self.root, f)
31 return os.path.join(self.root, f)
32
32
33 def getcwd(self):
33 def getcwd(self):
34 cwd = os.getcwd()
34 cwd = os.getcwd()
35 if cwd == self.root: return ''
35 if cwd == self.root: return ''
36 return cwd[len(self.root) + 1:]
36 return cwd[len(self.root) + 1:]
37
37
38 def hgignore(self):
38 def hgignore(self):
39 '''return the contents of .hgignore files as a list of patterns.
39 '''return the contents of .hgignore files as a list of patterns.
40
40
41 the files parsed for patterns include:
41 the files parsed for patterns include:
42 .hgignore in the repository root
42 .hgignore in the repository root
43 any additional files specified in the [ui] section of ~/.hgrc
43 any additional files specified in the [ui] section of ~/.hgrc
44
44
45 trailing white space is dropped.
45 trailing white space is dropped.
46 the escape character is backslash.
46 the escape character is backslash.
47 comments start with #.
47 comments start with #.
48 empty lines are skipped.
48 empty lines are skipped.
49
49
50 lines can be of the following formats:
50 lines can be of the following formats:
51
51
52 syntax: regexp # defaults following lines to non-rooted regexps
52 syntax: regexp # defaults following lines to non-rooted regexps
53 syntax: glob # defaults following lines to non-rooted globs
53 syntax: glob # defaults following lines to non-rooted globs
54 re:pattern # non-rooted regular expression
54 re:pattern # non-rooted regular expression
55 glob:pattern # non-rooted glob
55 glob:pattern # non-rooted glob
56 pattern # pattern of the current default type'''
56 pattern # pattern of the current default type'''
57 syntaxes = {'re': 'relre:', 'regexp': 'relre:', 'glob': 'relglob:'}
57 syntaxes = {'re': 'relre:', 'regexp': 'relre:', 'glob': 'relglob:'}
58 def parselines(fp):
58 def parselines(fp):
59 for line in fp:
59 for line in fp:
60 escape = False
60 escape = False
61 for i in xrange(len(line)):
61 for i in xrange(len(line)):
62 if escape: escape = False
62 if escape: escape = False
63 elif line[i] == '\\': escape = True
63 elif line[i] == '\\': escape = True
64 elif line[i] == '#': break
64 elif line[i] == '#': break
65 line = line[:i].rstrip()
65 line = line[:i].rstrip()
66 if line: yield line
66 if line: yield line
67 repoignore = self.wjoin('.hgignore')
67 repoignore = self.wjoin('.hgignore')
68 files = [repoignore]
68 files = [repoignore]
69 files.extend(self.ui.hgignorefiles())
69 files.extend(self.ui.hgignorefiles())
70 pats = {}
70 pats = {}
71 for f in files:
71 for f in files:
72 try:
72 try:
73 pats[f] = []
73 pats[f] = []
74 fp = open(f)
74 fp = open(f)
75 syntax = 'relre:'
75 syntax = 'relre:'
76 for line in parselines(fp):
76 for line in parselines(fp):
77 if line.startswith('syntax:'):
77 if line.startswith('syntax:'):
78 s = line[7:].strip()
78 s = line[7:].strip()
79 try:
79 try:
80 syntax = syntaxes[s]
80 syntax = syntaxes[s]
81 except KeyError:
81 except KeyError:
82 self.ui.warn(_("%s: ignoring invalid "
82 self.ui.warn(_("%s: ignoring invalid "
83 "syntax '%s'\n") % (f, s))
83 "syntax '%s'\n") % (f, s))
84 continue
84 continue
85 pat = syntax + line
85 pat = syntax + line
86 for s in syntaxes.values():
86 for s in syntaxes.values():
87 if line.startswith(s):
87 if line.startswith(s):
88 pat = line
88 pat = line
89 break
89 break
90 pats[f].append(pat)
90 pats[f].append(pat)
91 except IOError, inst:
91 except IOError, inst:
92 if f != repoignore:
92 if f != repoignore:
93 self.ui.warn(_("skipping unreadable ignore file"
93 self.ui.warn(_("skipping unreadable ignore file"
94 " '%s': %s\n") % (f, inst.strerror))
94 " '%s': %s\n") % (f, inst.strerror))
95 return pats
95 return pats
96
96
97 def ignore(self, fn):
97 def ignore(self, fn):
98 '''default match function used by dirstate and
98 '''default match function used by dirstate and
99 localrepository. this honours the repository .hgignore file
99 localrepository. this honours the repository .hgignore file
100 and any other files specified in the [ui] section of .hgrc.'''
100 and any other files specified in the [ui] section of .hgrc.'''
101 if self.blockignore:
101 if self.blockignore:
102 return False
102 return False
103 if not self.ignorefunc:
103 if not self.ignorefunc:
104 ignore = self.hgignore()
104 ignore = self.hgignore()
105 allpats = []
105 allpats = []
106 [allpats.extend(patlist) for patlist in ignore.values()]
106 [allpats.extend(patlist) for patlist in ignore.values()]
107 if allpats:
107 if allpats:
108 try:
108 try:
109 files, self.ignorefunc, anypats = (
109 files, self.ignorefunc, anypats = (
110 util.matcher(self.root, inc=allpats, src='.hgignore'))
110 util.matcher(self.root, inc=allpats, src='.hgignore'))
111 except util.Abort:
111 except util.Abort:
112 # Re-raise an exception where the src is the right file
112 # Re-raise an exception where the src is the right file
113 for f, patlist in ignore.items():
113 for f, patlist in ignore.items():
114 files, self.ignorefunc, anypats = (
114 files, self.ignorefunc, anypats = (
115 util.matcher(self.root, inc=patlist, src=f))
115 util.matcher(self.root, inc=patlist, src=f))
116 else:
116 else:
117 self.ignorefunc = util.never
117 self.ignorefunc = util.never
118 return self.ignorefunc(fn)
118 return self.ignorefunc(fn)
119
119
120 def __del__(self):
120 def __del__(self):
121 if self.dirty:
121 if self.dirty:
122 self.write()
122 self.write()
123
123
124 def __getitem__(self, key):
124 def __getitem__(self, key):
125 try:
125 try:
126 return self.map[key]
126 return self.map[key]
127 except TypeError:
127 except TypeError:
128 self.lazyread()
128 self.lazyread()
129 return self[key]
129 return self[key]
130
130
131 def __contains__(self, key):
131 def __contains__(self, key):
132 self.lazyread()
132 self.lazyread()
133 return key in self.map
133 return key in self.map
134
134
135 def parents(self):
135 def parents(self):
136 self.lazyread()
136 self.lazyread()
137 return self.pl
137 return self.pl
138
138
139 def markdirty(self):
139 def markdirty(self):
140 if not self.dirty:
140 if not self.dirty:
141 self.dirty = 1
141 self.dirty = 1
142
142
143 def setparents(self, p1, p2=nullid):
143 def setparents(self, p1, p2=nullid):
144 self.lazyread()
144 self.lazyread()
145 self.markdirty()
145 self.markdirty()
146 self.pl = p1, p2
146 self.pl = p1, p2
147
147
148 def state(self, key):
148 def state(self, key):
149 try:
149 try:
150 return self[key][0]
150 return self[key][0]
151 except KeyError:
151 except KeyError:
152 return "?"
152 return "?"
153
153
154 def lazyread(self):
154 def lazyread(self):
155 if self.map is None:
155 if self.map is None:
156 self.read()
156 self.read()
157
157
158 def parse(self, st):
158 def parse(self, st):
159 self.pl = [st[:20], st[20: 40]]
159 self.pl = [st[:20], st[20: 40]]
160
160
161 # deref fields so they will be local in loop
161 # deref fields so they will be local in loop
162 map = self.map
162 map = self.map
163 copies = self.copies
163 copies = self.copies
164 format = self.format
164 format = self.format
165 unpack = struct.unpack
165 unpack = struct.unpack
166
166
167 pos = 40
167 pos = 40
168 e_size = struct.calcsize(format)
168 e_size = struct.calcsize(format)
169
169
170 while pos < len(st):
170 while pos < len(st):
171 newpos = pos + e_size
171 newpos = pos + e_size
172 e = unpack(format, st[pos:newpos])
172 e = unpack(format, st[pos:newpos])
173 l = e[4]
173 l = e[4]
174 pos = newpos
174 pos = newpos
175 newpos = pos + l
175 newpos = pos + l
176 f = st[pos:newpos]
176 f = st[pos:newpos]
177 if '\0' in f:
177 if '\0' in f:
178 f, c = f.split('\0')
178 f, c = f.split('\0')
179 copies[f] = c
179 copies[f] = c
180 map[f] = e[:4]
180 map[f] = e[:4]
181 pos = newpos
181 pos = newpos
182
182
183 def read(self):
183 def read(self):
184 self.map = {}
184 self.map = {}
185 self.pl = [nullid, nullid]
185 self.pl = [nullid, nullid]
186 try:
186 try:
187 st = self.opener("dirstate").read()
187 st = self.opener("dirstate").read()
188 if st:
188 if st:
189 self.parse(st)
189 self.parse(st)
190 except IOError, err:
190 except IOError, err:
191 if err.errno != errno.ENOENT: raise
191 if err.errno != errno.ENOENT: raise
192
192
193 def copy(self, source, dest):
193 def copy(self, source, dest):
194 self.lazyread()
194 self.lazyread()
195 self.markdirty()
195 self.markdirty()
196 self.copies[dest] = source
196 self.copies[dest] = source
197
197
198 def copied(self, file):
198 def copied(self, file):
199 return self.copies.get(file, None)
199 return self.copies.get(file, None)
200
200
201 def update(self, files, state, **kw):
201 def update(self, files, state, **kw):
202 ''' current states:
202 ''' current states:
203 n normal
203 n normal
204 m needs merging
204 m needs merging
205 r marked for removal
205 r marked for removal
206 a marked for addition'''
206 a marked for addition'''
207
207
208 if not files: return
208 if not files: return
209 self.lazyread()
209 self.lazyread()
210 self.markdirty()
210 self.markdirty()
211 for f in files:
211 for f in files:
212 if state == "r":
212 if state == "r":
213 self.map[f] = ('r', 0, 0, 0)
213 self.map[f] = ('r', 0, 0, 0)
214 else:
214 else:
215 s = os.lstat(self.wjoin(f))
215 s = os.lstat(self.wjoin(f))
216 st_size = kw.get('st_size', s.st_size)
216 st_size = kw.get('st_size', s.st_size)
217 st_mtime = kw.get('st_mtime', s.st_mtime)
217 st_mtime = kw.get('st_mtime', s.st_mtime)
218 self.map[f] = (state, s.st_mode, st_size, st_mtime)
218 self.map[f] = (state, s.st_mode, st_size, st_mtime)
219 if self.copies.has_key(f):
219 if self.copies.has_key(f):
220 del self.copies[f]
220 del self.copies[f]
221
221
222 def forget(self, files):
222 def forget(self, files):
223 if not files: return
223 if not files: return
224 self.lazyread()
224 self.lazyread()
225 self.markdirty()
225 self.markdirty()
226 for f in files:
226 for f in files:
227 try:
227 try:
228 del self.map[f]
228 del self.map[f]
229 except KeyError:
229 except KeyError:
230 self.ui.warn(_("not in dirstate: %s!\n") % f)
230 self.ui.warn(_("not in dirstate: %s!\n") % f)
231 pass
231 pass
232
232
233 def clear(self):
233 def clear(self):
234 self.map = {}
234 self.map = {}
235 self.copies = {}
235 self.copies = {}
236 self.markdirty()
236 self.markdirty()
237
237
238 def rebuild(self, parent, files):
238 def rebuild(self, parent, files):
239 self.clear()
239 self.clear()
240 umask = os.umask(0)
240 umask = os.umask(0)
241 os.umask(umask)
241 os.umask(umask)
242 for f, mode in files:
242 for f, mode in files:
243 if mode:
243 if mode:
244 self.map[f] = ('n', ~umask, -1, 0)
244 self.map[f] = ('n', ~umask, -1, 0)
245 else:
245 else:
246 self.map[f] = ('n', ~umask & 0666, -1, 0)
246 self.map[f] = ('n', ~umask & 0666, -1, 0)
247 self.pl = (parent, nullid)
247 self.pl = (parent, nullid)
248 self.markdirty()
248 self.markdirty()
249
249
250 def write(self):
250 def write(self):
251 if not self.dirty:
251 if not self.dirty:
252 return
252 return
253 st = self.opener("dirstate", "w", atomic=True)
253 st = self.opener("dirstate", "w", atomic=True)
254 st.write("".join(self.pl))
254 st.write("".join(self.pl))
255 for f, e in self.map.items():
255 for f, e in self.map.items():
256 c = self.copied(f)
256 c = self.copied(f)
257 if c:
257 if c:
258 f = f + "\0" + c
258 f = f + "\0" + c
259 e = struct.pack(self.format, e[0], e[1], e[2], e[3], len(f))
259 e = struct.pack(self.format, e[0], e[1], e[2], e[3], len(f))
260 st.write(e + f)
260 st.write(e + f)
261 self.dirty = 0
261 self.dirty = 0
262
262
263 def filterfiles(self, files):
263 def filterfiles(self, files):
264 ret = {}
264 ret = {}
265 unknown = []
265 unknown = []
266
266
267 for x in files:
267 for x in files:
268 if x == '.':
268 if x == '.':
269 return self.map.copy()
269 return self.map.copy()
270 if x not in self.map:
270 if x not in self.map:
271 unknown.append(x)
271 unknown.append(x)
272 else:
272 else:
273 ret[x] = self.map[x]
273 ret[x] = self.map[x]
274
274
275 if not unknown:
275 if not unknown:
276 return ret
276 return ret
277
277
278 b = self.map.keys()
278 b = self.map.keys()
279 b.sort()
279 b.sort()
280 blen = len(b)
280 blen = len(b)
281
281
282 for x in unknown:
282 for x in unknown:
283 bs = bisect.bisect(b, x)
283 bs = bisect.bisect(b, x)
284 if bs != 0 and b[bs-1] == x:
284 if bs != 0 and b[bs-1] == x:
285 ret[x] = self.map[x]
285 ret[x] = self.map[x]
286 continue
286 continue
287 while bs < blen:
287 while bs < blen:
288 s = b[bs]
288 s = b[bs]
289 if len(s) > len(x) and s.startswith(x) and s[len(x)] == '/':
289 if len(s) > len(x) and s.startswith(x) and s[len(x)] == '/':
290 ret[s] = self.map[s]
290 ret[s] = self.map[s]
291 else:
291 else:
292 break
292 break
293 bs += 1
293 bs += 1
294 return ret
294 return ret
295
295
296 def supported_type(self, f, st, verbose=False):
296 def supported_type(self, f, st, verbose=False):
297 if stat.S_ISREG(st.st_mode):
297 if stat.S_ISREG(st.st_mode):
298 return True
298 return True
299 if verbose:
299 if verbose:
300 kind = 'unknown'
300 kind = 'unknown'
301 if stat.S_ISCHR(st.st_mode): kind = _('character device')
301 if stat.S_ISCHR(st.st_mode): kind = _('character device')
302 elif stat.S_ISBLK(st.st_mode): kind = _('block device')
302 elif stat.S_ISBLK(st.st_mode): kind = _('block device')
303 elif stat.S_ISFIFO(st.st_mode): kind = _('fifo')
303 elif stat.S_ISFIFO(st.st_mode): kind = _('fifo')
304 elif stat.S_ISLNK(st.st_mode): kind = _('symbolic link')
304 elif stat.S_ISLNK(st.st_mode): kind = _('symbolic link')
305 elif stat.S_ISSOCK(st.st_mode): kind = _('socket')
305 elif stat.S_ISSOCK(st.st_mode): kind = _('socket')
306 elif stat.S_ISDIR(st.st_mode): kind = _('directory')
306 elif stat.S_ISDIR(st.st_mode): kind = _('directory')
307 self.ui.warn(_('%s: unsupported file type (type is %s)\n') % (
307 self.ui.warn(_('%s: unsupported file type (type is %s)\n') % (
308 util.pathto(self.getcwd(), f),
308 util.pathto(self.getcwd(), f),
309 kind))
309 kind))
310 return False
310 return False
311
311
312 def statwalk(self, files=None, match=util.always, dc=None, ignored=False,
312 def statwalk(self, files=None, match=util.always, dc=None, ignored=False,
313 badmatch=None):
313 badmatch=None):
314 self.lazyread()
314 self.lazyread()
315
315
316 # walk all files by default
316 # walk all files by default
317 if not files:
317 if not files:
318 files = [self.root]
318 files = [self.root]
319 if not dc:
319 if not dc:
320 dc = self.map.copy()
320 dc = self.map.copy()
321 elif not dc:
321 elif not dc:
322 dc = self.filterfiles(files)
322 dc = self.filterfiles(files)
323
323
324 def statmatch(file_, stat):
324 def statmatch(file_, stat):
325 file_ = util.pconvert(file_)
325 file_ = util.pconvert(file_)
326 if not ignored and file_ not in dc and self.ignore(file_):
326 if not ignored and file_ not in dc and self.ignore(file_):
327 return False
327 return False
328 return match(file_)
328 return match(file_)
329
329
330 return self.walkhelper(files=files, statmatch=statmatch, dc=dc,
330 return self.walkhelper(files=files, statmatch=statmatch, dc=dc,
331 badmatch=badmatch)
331 badmatch=badmatch)
332
332
333 def walk(self, files=None, match=util.always, dc=None, badmatch=None):
333 def walk(self, files=None, match=util.always, dc=None, badmatch=None):
334 # filter out the stat
334 # filter out the stat
335 for src, f, st in self.statwalk(files, match, dc, badmatch=badmatch):
335 for src, f, st in self.statwalk(files, match, dc, badmatch=badmatch):
336 yield src, f
336 yield src, f
337
337
338 # walk recursively through the directory tree, finding all files
338 # walk recursively through the directory tree, finding all files
339 # matched by the statmatch function
339 # matched by the statmatch function
340 #
340 #
341 # results are yielded in a tuple (src, filename, st), where src
341 # results are yielded in a tuple (src, filename, st), where src
342 # is one of:
342 # is one of:
343 # 'f' the file was found in the directory tree
343 # 'f' the file was found in the directory tree
344 # 'm' the file was only in the dirstate and not in the tree
344 # 'm' the file was only in the dirstate and not in the tree
345 # and st is the stat result if the file was found in the directory.
345 # and st is the stat result if the file was found in the directory.
346 #
346 #
347 # dc is an optional arg for the current dirstate. dc is not modified
347 # dc is an optional arg for the current dirstate. dc is not modified
348 # directly by this function, but might be modified by your statmatch call.
348 # directly by this function, but might be modified by your statmatch call.
349 #
349 #
350 def walkhelper(self, files, statmatch, dc, badmatch=None):
350 def walkhelper(self, files, statmatch, dc, badmatch=None):
351 # recursion free walker, faster than os.walk.
351 # recursion free walker, faster than os.walk.
352 def findfiles(s):
352 def findfiles(s):
353 work = [s]
353 work = [s]
354 while work:
354 while work:
355 top = work.pop()
355 top = work.pop()
356 names = os.listdir(top)
356 names = os.listdir(top)
357 names.sort()
357 names.sort()
358 # nd is the top of the repository dir tree
358 # nd is the top of the repository dir tree
359 nd = util.normpath(top[len(self.root) + 1:])
359 nd = util.normpath(top[len(self.root) + 1:])
360 if nd == '.':
360 if nd == '.':
361 nd = ''
361 nd = ''
362 else:
362 else:
363 # do not recurse into a repo contained in this
363 # do not recurse into a repo contained in this
364 # one. use bisect to find .hg directory so speed
364 # one. use bisect to find .hg directory so speed
365 # is good on big directory.
365 # is good on big directory.
366 hg = bisect.bisect_left(names, '.hg')
366 hg = bisect.bisect_left(names, '.hg')
367 if hg < len(names) and names[hg] == '.hg':
367 if hg < len(names) and names[hg] == '.hg':
368 if os.path.isdir(os.path.join(top, '.hg')):
368 if os.path.isdir(os.path.join(top, '.hg')):
369 continue
369 continue
370 for f in names:
370 for f in names:
371 np = util.pconvert(os.path.join(nd, f))
371 np = util.pconvert(os.path.join(nd, f))
372 if seen(np):
372 if seen(np):
373 continue
373 continue
374 p = os.path.join(top, f)
374 p = os.path.join(top, f)
375 # don't trip over symlinks
375 # don't trip over symlinks
376 st = os.lstat(p)
376 st = os.lstat(p)
377 if stat.S_ISDIR(st.st_mode):
377 if stat.S_ISDIR(st.st_mode):
378 ds = os.path.join(nd, f +'/')
378 ds = os.path.join(nd, f +'/')
379 if statmatch(ds, st):
379 if statmatch(ds, st):
380 work.append(p)
380 work.append(p)
381 if statmatch(np, st) and np in dc:
381 if statmatch(np, st) and np in dc:
382 yield 'm', np, st
382 yield 'm', np, st
383 elif statmatch(np, st):
383 elif statmatch(np, st):
384 if self.supported_type(np, st):
384 if self.supported_type(np, st):
385 yield 'f', np, st
385 yield 'f', np, st
386 elif np in dc:
386 elif np in dc:
387 yield 'm', np, st
387 yield 'm', np, st
388
388
389 known = {'.hg': 1}
389 known = {'.hg': 1}
390 def seen(fn):
390 def seen(fn):
391 if fn in known: return True
391 if fn in known: return True
392 known[fn] = 1
392 known[fn] = 1
393
393
394 # step one, find all files that match our criteria
394 # step one, find all files that match our criteria
395 files.sort()
395 files.sort()
396 for ff in util.unique(files):
396 for ff in util.unique(files):
397 f = self.wjoin(ff)
397 f = self.wjoin(ff)
398 try:
398 try:
399 st = os.lstat(f)
399 st = os.lstat(f)
400 except OSError, inst:
400 except OSError, inst:
401 nf = util.normpath(ff)
401 nf = util.normpath(ff)
402 found = False
402 found = False
403 for fn in dc:
403 for fn in dc:
404 if nf == fn or (fn.startswith(nf) and fn[len(nf)] == '/'):
404 if nf == fn or (fn.startswith(nf) and fn[len(nf)] == '/'):
405 found = True
405 found = True
406 break
406 break
407 if not found:
407 if not found:
408 if inst.errno != errno.ENOENT or not badmatch:
408 if inst.errno != errno.ENOENT or not badmatch:
409 self.ui.warn('%s: %s\n' % (
409 self.ui.warn('%s: %s\n' % (
410 util.pathto(self.getcwd(), ff),
410 util.pathto(self.getcwd(), ff),
411 inst.strerror))
411 inst.strerror))
412 elif badmatch and badmatch(ff) and statmatch(ff, None):
412 elif badmatch and badmatch(ff) and statmatch(ff, None):
413 yield 'b', ff, None
413 yield 'b', ff, None
414 continue
414 continue
415 if stat.S_ISDIR(st.st_mode):
415 if stat.S_ISDIR(st.st_mode):
416 cmp1 = (lambda x, y: cmp(x[1], y[1]))
416 cmp1 = (lambda x, y: cmp(x[1], y[1]))
417 sorted_ = [ x for x in findfiles(f) ]
417 sorted_ = [ x for x in findfiles(f) ]
418 sorted_.sort(cmp1)
418 sorted_.sort(cmp1)
419 for e in sorted_:
419 for e in sorted_:
420 yield e
420 yield e
421 else:
421 else:
422 ff = util.normpath(ff)
422 ff = util.normpath(ff)
423 if seen(ff):
423 if seen(ff):
424 continue
424 continue
425 self.blockignore = True
425 self.blockignore = True
426 if statmatch(ff, st):
426 if statmatch(ff, st):
427 if self.supported_type(ff, st, verbose=True):
427 if self.supported_type(ff, st, verbose=True):
428 yield 'f', ff, st
428 yield 'f', ff, st
429 elif ff in dc:
429 elif ff in dc:
430 yield 'm', ff, st
430 yield 'm', ff, st
431 self.blockignore = False
431 self.blockignore = False
432
432
433 # step two run through anything left in the dc hash and yield
433 # step two run through anything left in the dc hash and yield
434 # if we haven't already seen it
434 # if we haven't already seen it
435 ks = dc.keys()
435 ks = dc.keys()
436 ks.sort()
436 ks.sort()
437 for k in ks:
437 for k in ks:
438 if not seen(k) and (statmatch(k, None)):
438 if not seen(k) and (statmatch(k, None)):
439 yield 'm', k, None
439 yield 'm', k, None
440
440
441 def changes(self, files=None, match=util.always, show_ignored=None):
441 def changes(self, files=None, match=util.always, show_ignored=None):
442 lookup, modified, added, unknown, ignored = [], [], [], [], []
442 lookup, modified, added, unknown, ignored = [], [], [], [], []
443 removed, deleted = [], []
443 removed, deleted = [], []
444
444
445 for src, fn, st in self.statwalk(files, match, ignored=show_ignored):
445 for src, fn, st in self.statwalk(files, match, ignored=show_ignored):
446 try:
446 try:
447 type_, mode, size, time = self[fn]
447 type_, mode, size, time = self[fn]
448 except KeyError:
448 except KeyError:
449 if show_ignored and self.ignore(fn):
449 if show_ignored and self.ignore(fn):
450 ignored.append(fn)
450 ignored.append(fn)
451 else:
451 else:
452 unknown.append(fn)
452 unknown.append(fn)
453 continue
453 continue
454 if src == 'm':
454 if src == 'm':
455 nonexistent = True
455 nonexistent = True
456 if not st:
456 if not st:
457 try:
457 try:
458 st = os.lstat(self.wjoin(fn))
458 st = os.lstat(self.wjoin(fn))
459 except OSError, inst:
459 except OSError, inst:
460 if inst.errno != errno.ENOENT:
460 if inst.errno != errno.ENOENT:
461 raise
461 raise
462 st = None
462 st = None
463 # We need to re-check that it is a valid file
463 # We need to re-check that it is a valid file
464 if st and self.supported_type(fn, st):
464 if st and self.supported_type(fn, st):
465 nonexistent = False
465 nonexistent = False
466 # XXX: what to do with file no longer present in the fs
466 # XXX: what to do with file no longer present in the fs
467 # who are not removed in the dirstate ?
467 # who are not removed in the dirstate ?
468 if nonexistent and type_ in "nm":
468 if nonexistent and type_ in "nm":
469 deleted.append(fn)
469 deleted.append(fn)
470 continue
470 continue
471 # check the common case first
471 # check the common case first
472 if type_ == 'n':
472 if type_ == 'n':
473 if not st:
473 if not st:
474 st = os.stat(self.wjoin(fn))
474 st = os.lstat(self.wjoin(fn))
475 if size >= 0 and (size != st.st_size
475 if size >= 0 and (size != st.st_size
476 or (mode ^ st.st_mode) & 0100):
476 or (mode ^ st.st_mode) & 0100):
477 modified.append(fn)
477 modified.append(fn)
478 elif time != st.st_mtime:
478 elif time != st.st_mtime:
479 lookup.append(fn)
479 lookup.append(fn)
480 elif type_ == 'm':
480 elif type_ == 'm':
481 modified.append(fn)
481 modified.append(fn)
482 elif type_ == 'a':
482 elif type_ == 'a':
483 added.append(fn)
483 added.append(fn)
484 elif type_ == 'r':
484 elif type_ == 'r':
485 removed.append(fn)
485 removed.append(fn)
486
486
487 return (lookup, modified, added, removed, deleted, unknown, ignored)
487 return (lookup, modified, added, removed, deleted, unknown, ignored)
@@ -1,2145 +1,2145 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 import os, util
8 import os, util
9 import filelog, manifest, changelog, dirstate, repo
9 import filelog, manifest, changelog, dirstate, repo
10 from node import *
10 from node import *
11 from i18n import gettext as _
11 from i18n import gettext as _
12 from demandload import *
12 from demandload import *
13 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "appendfile changegroup")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "revlog")
15 demandload(globals(), "revlog")
16
16
17 class localrepository(object):
17 class localrepository(object):
18 capabilities = ()
18 capabilities = ()
19
19
20 def __del__(self):
20 def __del__(self):
21 self.transhandle = None
21 self.transhandle = None
22 def __init__(self, parentui, path=None, create=0):
22 def __init__(self, parentui, path=None, create=0):
23 if not path:
23 if not path:
24 p = os.getcwd()
24 p = os.getcwd()
25 while not os.path.isdir(os.path.join(p, ".hg")):
25 while not os.path.isdir(os.path.join(p, ".hg")):
26 oldp = p
26 oldp = p
27 p = os.path.dirname(p)
27 p = os.path.dirname(p)
28 if p == oldp:
28 if p == oldp:
29 raise repo.RepoError(_("no repo found"))
29 raise repo.RepoError(_("no repo found"))
30 path = p
30 path = p
31 self.path = os.path.join(path, ".hg")
31 self.path = os.path.join(path, ".hg")
32
32
33 if not create and not os.path.isdir(self.path):
33 if not create and not os.path.isdir(self.path):
34 raise repo.RepoError(_("repository %s not found") % path)
34 raise repo.RepoError(_("repository %s not found") % path)
35
35
36 self.root = os.path.abspath(path)
36 self.root = os.path.abspath(path)
37 self.origroot = path
37 self.origroot = path
38 self.ui = ui.ui(parentui=parentui)
38 self.ui = ui.ui(parentui=parentui)
39 self.opener = util.opener(self.path)
39 self.opener = util.opener(self.path)
40 self.wopener = util.opener(self.root)
40 self.wopener = util.opener(self.root)
41
41
42 try:
42 try:
43 self.ui.readconfig(self.join("hgrc"), self.root)
43 self.ui.readconfig(self.join("hgrc"), self.root)
44 except IOError:
44 except IOError:
45 pass
45 pass
46
46
47 v = self.ui.revlogopts
47 v = self.ui.revlogopts
48 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
48 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
49 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
49 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
50 fl = v.get('flags', None)
50 fl = v.get('flags', None)
51 flags = 0
51 flags = 0
52 if fl != None:
52 if fl != None:
53 for x in fl.split():
53 for x in fl.split():
54 flags |= revlog.flagstr(x)
54 flags |= revlog.flagstr(x)
55 elif self.revlogv1:
55 elif self.revlogv1:
56 flags = revlog.REVLOG_DEFAULT_FLAGS
56 flags = revlog.REVLOG_DEFAULT_FLAGS
57
57
58 v = self.revlogversion | flags
58 v = self.revlogversion | flags
59 self.manifest = manifest.manifest(self.opener, v)
59 self.manifest = manifest.manifest(self.opener, v)
60 self.changelog = changelog.changelog(self.opener, v)
60 self.changelog = changelog.changelog(self.opener, v)
61
61
62 # the changelog might not have the inline index flag
62 # the changelog might not have the inline index flag
63 # on. If the format of the changelog is the same as found in
63 # on. If the format of the changelog is the same as found in
64 # .hgrc, apply any flags found in the .hgrc as well.
64 # .hgrc, apply any flags found in the .hgrc as well.
65 # Otherwise, just version from the changelog
65 # Otherwise, just version from the changelog
66 v = self.changelog.version
66 v = self.changelog.version
67 if v == self.revlogversion:
67 if v == self.revlogversion:
68 v |= flags
68 v |= flags
69 self.revlogversion = v
69 self.revlogversion = v
70
70
71 self.tagscache = None
71 self.tagscache = None
72 self.nodetagscache = None
72 self.nodetagscache = None
73 self.encodepats = None
73 self.encodepats = None
74 self.decodepats = None
74 self.decodepats = None
75 self.transhandle = None
75 self.transhandle = None
76
76
77 if create:
77 if create:
78 os.mkdir(self.path)
78 os.mkdir(self.path)
79 os.mkdir(self.join("data"))
79 os.mkdir(self.join("data"))
80
80
81 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
81 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
82
82
83 def hook(self, name, throw=False, **args):
83 def hook(self, name, throw=False, **args):
84 def callhook(hname, funcname):
84 def callhook(hname, funcname):
85 '''call python hook. hook is callable object, looked up as
85 '''call python hook. hook is callable object, looked up as
86 name in python module. if callable returns "true", hook
86 name in python module. if callable returns "true", hook
87 fails, else passes. if hook raises exception, treated as
87 fails, else passes. if hook raises exception, treated as
88 hook failure. exception propagates if throw is "true".
88 hook failure. exception propagates if throw is "true".
89
89
90 reason for "true" meaning "hook failed" is so that
90 reason for "true" meaning "hook failed" is so that
91 unmodified commands (e.g. mercurial.commands.update) can
91 unmodified commands (e.g. mercurial.commands.update) can
92 be run as hooks without wrappers to convert return values.'''
92 be run as hooks without wrappers to convert return values.'''
93
93
94 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
94 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
95 d = funcname.rfind('.')
95 d = funcname.rfind('.')
96 if d == -1:
96 if d == -1:
97 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
97 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
98 % (hname, funcname))
98 % (hname, funcname))
99 modname = funcname[:d]
99 modname = funcname[:d]
100 try:
100 try:
101 obj = __import__(modname)
101 obj = __import__(modname)
102 except ImportError:
102 except ImportError:
103 raise util.Abort(_('%s hook is invalid '
103 raise util.Abort(_('%s hook is invalid '
104 '(import of "%s" failed)') %
104 '(import of "%s" failed)') %
105 (hname, modname))
105 (hname, modname))
106 try:
106 try:
107 for p in funcname.split('.')[1:]:
107 for p in funcname.split('.')[1:]:
108 obj = getattr(obj, p)
108 obj = getattr(obj, p)
109 except AttributeError, err:
109 except AttributeError, err:
110 raise util.Abort(_('%s hook is invalid '
110 raise util.Abort(_('%s hook is invalid '
111 '("%s" is not defined)') %
111 '("%s" is not defined)') %
112 (hname, funcname))
112 (hname, funcname))
113 if not callable(obj):
113 if not callable(obj):
114 raise util.Abort(_('%s hook is invalid '
114 raise util.Abort(_('%s hook is invalid '
115 '("%s" is not callable)') %
115 '("%s" is not callable)') %
116 (hname, funcname))
116 (hname, funcname))
117 try:
117 try:
118 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
118 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
119 except (KeyboardInterrupt, util.SignalInterrupt):
119 except (KeyboardInterrupt, util.SignalInterrupt):
120 raise
120 raise
121 except Exception, exc:
121 except Exception, exc:
122 if isinstance(exc, util.Abort):
122 if isinstance(exc, util.Abort):
123 self.ui.warn(_('error: %s hook failed: %s\n') %
123 self.ui.warn(_('error: %s hook failed: %s\n') %
124 (hname, exc.args[0] % exc.args[1:]))
124 (hname, exc.args[0] % exc.args[1:]))
125 else:
125 else:
126 self.ui.warn(_('error: %s hook raised an exception: '
126 self.ui.warn(_('error: %s hook raised an exception: '
127 '%s\n') % (hname, exc))
127 '%s\n') % (hname, exc))
128 if throw:
128 if throw:
129 raise
129 raise
130 self.ui.print_exc()
130 self.ui.print_exc()
131 return True
131 return True
132 if r:
132 if r:
133 if throw:
133 if throw:
134 raise util.Abort(_('%s hook failed') % hname)
134 raise util.Abort(_('%s hook failed') % hname)
135 self.ui.warn(_('warning: %s hook failed\n') % hname)
135 self.ui.warn(_('warning: %s hook failed\n') % hname)
136 return r
136 return r
137
137
138 def runhook(name, cmd):
138 def runhook(name, cmd):
139 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
139 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
140 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
140 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
141 r = util.system(cmd, environ=env, cwd=self.root)
141 r = util.system(cmd, environ=env, cwd=self.root)
142 if r:
142 if r:
143 desc, r = util.explain_exit(r)
143 desc, r = util.explain_exit(r)
144 if throw:
144 if throw:
145 raise util.Abort(_('%s hook %s') % (name, desc))
145 raise util.Abort(_('%s hook %s') % (name, desc))
146 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
146 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
147 return r
147 return r
148
148
149 r = False
149 r = False
150 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
150 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
151 if hname.split(".", 1)[0] == name and cmd]
151 if hname.split(".", 1)[0] == name and cmd]
152 hooks.sort()
152 hooks.sort()
153 for hname, cmd in hooks:
153 for hname, cmd in hooks:
154 if cmd.startswith('python:'):
154 if cmd.startswith('python:'):
155 r = callhook(hname, cmd[7:].strip()) or r
155 r = callhook(hname, cmd[7:].strip()) or r
156 else:
156 else:
157 r = runhook(hname, cmd) or r
157 r = runhook(hname, cmd) or r
158 return r
158 return r
159
159
160 def tags(self):
160 def tags(self):
161 '''return a mapping of tag to node'''
161 '''return a mapping of tag to node'''
162 if not self.tagscache:
162 if not self.tagscache:
163 self.tagscache = {}
163 self.tagscache = {}
164
164
165 def parsetag(line, context):
165 def parsetag(line, context):
166 if not line:
166 if not line:
167 return
167 return
168 s = l.split(" ", 1)
168 s = l.split(" ", 1)
169 if len(s) != 2:
169 if len(s) != 2:
170 self.ui.warn(_("%s: cannot parse entry\n") % context)
170 self.ui.warn(_("%s: cannot parse entry\n") % context)
171 return
171 return
172 node, key = s
172 node, key = s
173 key = key.strip()
173 key = key.strip()
174 try:
174 try:
175 bin_n = bin(node)
175 bin_n = bin(node)
176 except TypeError:
176 except TypeError:
177 self.ui.warn(_("%s: node '%s' is not well formed\n") %
177 self.ui.warn(_("%s: node '%s' is not well formed\n") %
178 (context, node))
178 (context, node))
179 return
179 return
180 if bin_n not in self.changelog.nodemap:
180 if bin_n not in self.changelog.nodemap:
181 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
181 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
182 (context, key))
182 (context, key))
183 return
183 return
184 self.tagscache[key] = bin_n
184 self.tagscache[key] = bin_n
185
185
186 # read the tags file from each head, ending with the tip,
186 # read the tags file from each head, ending with the tip,
187 # and add each tag found to the map, with "newer" ones
187 # and add each tag found to the map, with "newer" ones
188 # taking precedence
188 # taking precedence
189 heads = self.heads()
189 heads = self.heads()
190 heads.reverse()
190 heads.reverse()
191 fl = self.file(".hgtags")
191 fl = self.file(".hgtags")
192 for node in heads:
192 for node in heads:
193 change = self.changelog.read(node)
193 change = self.changelog.read(node)
194 rev = self.changelog.rev(node)
194 rev = self.changelog.rev(node)
195 fn, ff = self.manifest.find(change[0], '.hgtags')
195 fn, ff = self.manifest.find(change[0], '.hgtags')
196 if fn is None: continue
196 if fn is None: continue
197 count = 0
197 count = 0
198 for l in fl.read(fn).splitlines():
198 for l in fl.read(fn).splitlines():
199 count += 1
199 count += 1
200 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
200 parsetag(l, _(".hgtags (rev %d:%s), line %d") %
201 (rev, short(node), count))
201 (rev, short(node), count))
202 try:
202 try:
203 f = self.opener("localtags")
203 f = self.opener("localtags")
204 count = 0
204 count = 0
205 for l in f:
205 for l in f:
206 count += 1
206 count += 1
207 parsetag(l, _("localtags, line %d") % count)
207 parsetag(l, _("localtags, line %d") % count)
208 except IOError:
208 except IOError:
209 pass
209 pass
210
210
211 self.tagscache['tip'] = self.changelog.tip()
211 self.tagscache['tip'] = self.changelog.tip()
212
212
213 return self.tagscache
213 return self.tagscache
214
214
215 def tagslist(self):
215 def tagslist(self):
216 '''return a list of tags ordered by revision'''
216 '''return a list of tags ordered by revision'''
217 l = []
217 l = []
218 for t, n in self.tags().items():
218 for t, n in self.tags().items():
219 try:
219 try:
220 r = self.changelog.rev(n)
220 r = self.changelog.rev(n)
221 except:
221 except:
222 r = -2 # sort to the beginning of the list if unknown
222 r = -2 # sort to the beginning of the list if unknown
223 l.append((r, t, n))
223 l.append((r, t, n))
224 l.sort()
224 l.sort()
225 return [(t, n) for r, t, n in l]
225 return [(t, n) for r, t, n in l]
226
226
227 def nodetags(self, node):
227 def nodetags(self, node):
228 '''return the tags associated with a node'''
228 '''return the tags associated with a node'''
229 if not self.nodetagscache:
229 if not self.nodetagscache:
230 self.nodetagscache = {}
230 self.nodetagscache = {}
231 for t, n in self.tags().items():
231 for t, n in self.tags().items():
232 self.nodetagscache.setdefault(n, []).append(t)
232 self.nodetagscache.setdefault(n, []).append(t)
233 return self.nodetagscache.get(node, [])
233 return self.nodetagscache.get(node, [])
234
234
235 def lookup(self, key):
235 def lookup(self, key):
236 try:
236 try:
237 return self.tags()[key]
237 return self.tags()[key]
238 except KeyError:
238 except KeyError:
239 try:
239 try:
240 return self.changelog.lookup(key)
240 return self.changelog.lookup(key)
241 except:
241 except:
242 raise repo.RepoError(_("unknown revision '%s'") % key)
242 raise repo.RepoError(_("unknown revision '%s'") % key)
243
243
244 def dev(self):
244 def dev(self):
245 return os.stat(self.path).st_dev
245 return os.lstat(self.path).st_dev
246
246
247 def local(self):
247 def local(self):
248 return True
248 return True
249
249
250 def join(self, f):
250 def join(self, f):
251 return os.path.join(self.path, f)
251 return os.path.join(self.path, f)
252
252
253 def wjoin(self, f):
253 def wjoin(self, f):
254 return os.path.join(self.root, f)
254 return os.path.join(self.root, f)
255
255
256 def file(self, f):
256 def file(self, f):
257 if f[0] == '/':
257 if f[0] == '/':
258 f = f[1:]
258 f = f[1:]
259 return filelog.filelog(self.opener, f, self.revlogversion)
259 return filelog.filelog(self.opener, f, self.revlogversion)
260
260
261 def getcwd(self):
261 def getcwd(self):
262 return self.dirstate.getcwd()
262 return self.dirstate.getcwd()
263
263
264 def wfile(self, f, mode='r'):
264 def wfile(self, f, mode='r'):
265 return self.wopener(f, mode)
265 return self.wopener(f, mode)
266
266
267 def wread(self, filename):
267 def wread(self, filename):
268 if self.encodepats == None:
268 if self.encodepats == None:
269 l = []
269 l = []
270 for pat, cmd in self.ui.configitems("encode"):
270 for pat, cmd in self.ui.configitems("encode"):
271 mf = util.matcher(self.root, "", [pat], [], [])[1]
271 mf = util.matcher(self.root, "", [pat], [], [])[1]
272 l.append((mf, cmd))
272 l.append((mf, cmd))
273 self.encodepats = l
273 self.encodepats = l
274
274
275 data = self.wopener(filename, 'r').read()
275 data = self.wopener(filename, 'r').read()
276
276
277 for mf, cmd in self.encodepats:
277 for mf, cmd in self.encodepats:
278 if mf(filename):
278 if mf(filename):
279 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
279 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
280 data = util.filter(data, cmd)
280 data = util.filter(data, cmd)
281 break
281 break
282
282
283 return data
283 return data
284
284
285 def wwrite(self, filename, data, fd=None):
285 def wwrite(self, filename, data, fd=None):
286 if self.decodepats == None:
286 if self.decodepats == None:
287 l = []
287 l = []
288 for pat, cmd in self.ui.configitems("decode"):
288 for pat, cmd in self.ui.configitems("decode"):
289 mf = util.matcher(self.root, "", [pat], [], [])[1]
289 mf = util.matcher(self.root, "", [pat], [], [])[1]
290 l.append((mf, cmd))
290 l.append((mf, cmd))
291 self.decodepats = l
291 self.decodepats = l
292
292
293 for mf, cmd in self.decodepats:
293 for mf, cmd in self.decodepats:
294 if mf(filename):
294 if mf(filename):
295 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
295 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
296 data = util.filter(data, cmd)
296 data = util.filter(data, cmd)
297 break
297 break
298
298
299 if fd:
299 if fd:
300 return fd.write(data)
300 return fd.write(data)
301 return self.wopener(filename, 'w').write(data)
301 return self.wopener(filename, 'w').write(data)
302
302
303 def transaction(self):
303 def transaction(self):
304 tr = self.transhandle
304 tr = self.transhandle
305 if tr != None and tr.running():
305 if tr != None and tr.running():
306 return tr.nest()
306 return tr.nest()
307
307
308 # save dirstate for rollback
308 # save dirstate for rollback
309 try:
309 try:
310 ds = self.opener("dirstate").read()
310 ds = self.opener("dirstate").read()
311 except IOError:
311 except IOError:
312 ds = ""
312 ds = ""
313 self.opener("journal.dirstate", "w").write(ds)
313 self.opener("journal.dirstate", "w").write(ds)
314
314
315 tr = transaction.transaction(self.ui.warn, self.opener,
315 tr = transaction.transaction(self.ui.warn, self.opener,
316 self.join("journal"),
316 self.join("journal"),
317 aftertrans(self.path))
317 aftertrans(self.path))
318 self.transhandle = tr
318 self.transhandle = tr
319 return tr
319 return tr
320
320
321 def recover(self):
321 def recover(self):
322 l = self.lock()
322 l = self.lock()
323 if os.path.exists(self.join("journal")):
323 if os.path.exists(self.join("journal")):
324 self.ui.status(_("rolling back interrupted transaction\n"))
324 self.ui.status(_("rolling back interrupted transaction\n"))
325 transaction.rollback(self.opener, self.join("journal"))
325 transaction.rollback(self.opener, self.join("journal"))
326 self.reload()
326 self.reload()
327 return True
327 return True
328 else:
328 else:
329 self.ui.warn(_("no interrupted transaction available\n"))
329 self.ui.warn(_("no interrupted transaction available\n"))
330 return False
330 return False
331
331
332 def rollback(self, wlock=None):
332 def rollback(self, wlock=None):
333 if not wlock:
333 if not wlock:
334 wlock = self.wlock()
334 wlock = self.wlock()
335 l = self.lock()
335 l = self.lock()
336 if os.path.exists(self.join("undo")):
336 if os.path.exists(self.join("undo")):
337 self.ui.status(_("rolling back last transaction\n"))
337 self.ui.status(_("rolling back last transaction\n"))
338 transaction.rollback(self.opener, self.join("undo"))
338 transaction.rollback(self.opener, self.join("undo"))
339 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
339 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
340 self.reload()
340 self.reload()
341 self.wreload()
341 self.wreload()
342 else:
342 else:
343 self.ui.warn(_("no rollback information available\n"))
343 self.ui.warn(_("no rollback information available\n"))
344
344
345 def wreload(self):
345 def wreload(self):
346 self.dirstate.read()
346 self.dirstate.read()
347
347
348 def reload(self):
348 def reload(self):
349 self.changelog.load()
349 self.changelog.load()
350 self.manifest.load()
350 self.manifest.load()
351 self.tagscache = None
351 self.tagscache = None
352 self.nodetagscache = None
352 self.nodetagscache = None
353
353
354 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
354 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
355 desc=None):
355 desc=None):
356 try:
356 try:
357 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
357 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
358 except lock.LockHeld, inst:
358 except lock.LockHeld, inst:
359 if not wait:
359 if not wait:
360 raise
360 raise
361 self.ui.warn(_("waiting for lock on %s held by %s\n") %
361 self.ui.warn(_("waiting for lock on %s held by %s\n") %
362 (desc, inst.args[0]))
362 (desc, inst.args[0]))
363 # default to 600 seconds timeout
363 # default to 600 seconds timeout
364 l = lock.lock(self.join(lockname),
364 l = lock.lock(self.join(lockname),
365 int(self.ui.config("ui", "timeout") or 600),
365 int(self.ui.config("ui", "timeout") or 600),
366 releasefn, desc=desc)
366 releasefn, desc=desc)
367 if acquirefn:
367 if acquirefn:
368 acquirefn()
368 acquirefn()
369 return l
369 return l
370
370
371 def lock(self, wait=1):
371 def lock(self, wait=1):
372 return self.do_lock("lock", wait, acquirefn=self.reload,
372 return self.do_lock("lock", wait, acquirefn=self.reload,
373 desc=_('repository %s') % self.origroot)
373 desc=_('repository %s') % self.origroot)
374
374
375 def wlock(self, wait=1):
375 def wlock(self, wait=1):
376 return self.do_lock("wlock", wait, self.dirstate.write,
376 return self.do_lock("wlock", wait, self.dirstate.write,
377 self.wreload,
377 self.wreload,
378 desc=_('working directory of %s') % self.origroot)
378 desc=_('working directory of %s') % self.origroot)
379
379
380 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
380 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
381 "determine whether a new filenode is needed"
381 "determine whether a new filenode is needed"
382 fp1 = manifest1.get(filename, nullid)
382 fp1 = manifest1.get(filename, nullid)
383 fp2 = manifest2.get(filename, nullid)
383 fp2 = manifest2.get(filename, nullid)
384
384
385 if fp2 != nullid:
385 if fp2 != nullid:
386 # is one parent an ancestor of the other?
386 # is one parent an ancestor of the other?
387 fpa = filelog.ancestor(fp1, fp2)
387 fpa = filelog.ancestor(fp1, fp2)
388 if fpa == fp1:
388 if fpa == fp1:
389 fp1, fp2 = fp2, nullid
389 fp1, fp2 = fp2, nullid
390 elif fpa == fp2:
390 elif fpa == fp2:
391 fp2 = nullid
391 fp2 = nullid
392
392
393 # is the file unmodified from the parent? report existing entry
393 # is the file unmodified from the parent? report existing entry
394 if fp2 == nullid and text == filelog.read(fp1):
394 if fp2 == nullid and text == filelog.read(fp1):
395 return (fp1, None, None)
395 return (fp1, None, None)
396
396
397 return (None, fp1, fp2)
397 return (None, fp1, fp2)
398
398
399 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
399 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
400 orig_parent = self.dirstate.parents()[0] or nullid
400 orig_parent = self.dirstate.parents()[0] or nullid
401 p1 = p1 or self.dirstate.parents()[0] or nullid
401 p1 = p1 or self.dirstate.parents()[0] or nullid
402 p2 = p2 or self.dirstate.parents()[1] or nullid
402 p2 = p2 or self.dirstate.parents()[1] or nullid
403 c1 = self.changelog.read(p1)
403 c1 = self.changelog.read(p1)
404 c2 = self.changelog.read(p2)
404 c2 = self.changelog.read(p2)
405 m1 = self.manifest.read(c1[0])
405 m1 = self.manifest.read(c1[0])
406 mf1 = self.manifest.readflags(c1[0])
406 mf1 = self.manifest.readflags(c1[0])
407 m2 = self.manifest.read(c2[0])
407 m2 = self.manifest.read(c2[0])
408 changed = []
408 changed = []
409
409
410 if orig_parent == p1:
410 if orig_parent == p1:
411 update_dirstate = 1
411 update_dirstate = 1
412 else:
412 else:
413 update_dirstate = 0
413 update_dirstate = 0
414
414
415 if not wlock:
415 if not wlock:
416 wlock = self.wlock()
416 wlock = self.wlock()
417 l = self.lock()
417 l = self.lock()
418 tr = self.transaction()
418 tr = self.transaction()
419 mm = m1.copy()
419 mm = m1.copy()
420 mfm = mf1.copy()
420 mfm = mf1.copy()
421 linkrev = self.changelog.count()
421 linkrev = self.changelog.count()
422 for f in files:
422 for f in files:
423 try:
423 try:
424 t = self.wread(f)
424 t = self.wread(f)
425 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
425 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
426 r = self.file(f)
426 r = self.file(f)
427 mfm[f] = tm
427 mfm[f] = tm
428
428
429 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
429 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
430 if entry:
430 if entry:
431 mm[f] = entry
431 mm[f] = entry
432 continue
432 continue
433
433
434 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
434 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
435 changed.append(f)
435 changed.append(f)
436 if update_dirstate:
436 if update_dirstate:
437 self.dirstate.update([f], "n")
437 self.dirstate.update([f], "n")
438 except IOError:
438 except IOError:
439 try:
439 try:
440 del mm[f]
440 del mm[f]
441 del mfm[f]
441 del mfm[f]
442 if update_dirstate:
442 if update_dirstate:
443 self.dirstate.forget([f])
443 self.dirstate.forget([f])
444 except:
444 except:
445 # deleted from p2?
445 # deleted from p2?
446 pass
446 pass
447
447
448 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
448 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
449 user = user or self.ui.username()
449 user = user or self.ui.username()
450 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
450 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
451 tr.close()
451 tr.close()
452 if update_dirstate:
452 if update_dirstate:
453 self.dirstate.setparents(n, nullid)
453 self.dirstate.setparents(n, nullid)
454
454
455 def commit(self, files=None, text="", user=None, date=None,
455 def commit(self, files=None, text="", user=None, date=None,
456 match=util.always, force=False, lock=None, wlock=None,
456 match=util.always, force=False, lock=None, wlock=None,
457 force_editor=False):
457 force_editor=False):
458 commit = []
458 commit = []
459 remove = []
459 remove = []
460 changed = []
460 changed = []
461
461
462 if files:
462 if files:
463 for f in files:
463 for f in files:
464 s = self.dirstate.state(f)
464 s = self.dirstate.state(f)
465 if s in 'nmai':
465 if s in 'nmai':
466 commit.append(f)
466 commit.append(f)
467 elif s == 'r':
467 elif s == 'r':
468 remove.append(f)
468 remove.append(f)
469 else:
469 else:
470 self.ui.warn(_("%s not tracked!\n") % f)
470 self.ui.warn(_("%s not tracked!\n") % f)
471 else:
471 else:
472 modified, added, removed, deleted, unknown = self.changes(match=match)
472 modified, added, removed, deleted, unknown = self.changes(match=match)
473 commit = modified + added
473 commit = modified + added
474 remove = removed
474 remove = removed
475
475
476 p1, p2 = self.dirstate.parents()
476 p1, p2 = self.dirstate.parents()
477 c1 = self.changelog.read(p1)
477 c1 = self.changelog.read(p1)
478 c2 = self.changelog.read(p2)
478 c2 = self.changelog.read(p2)
479 m1 = self.manifest.read(c1[0])
479 m1 = self.manifest.read(c1[0])
480 mf1 = self.manifest.readflags(c1[0])
480 mf1 = self.manifest.readflags(c1[0])
481 m2 = self.manifest.read(c2[0])
481 m2 = self.manifest.read(c2[0])
482
482
483 if not commit and not remove and not force and p2 == nullid:
483 if not commit and not remove and not force and p2 == nullid:
484 self.ui.status(_("nothing changed\n"))
484 self.ui.status(_("nothing changed\n"))
485 return None
485 return None
486
486
487 xp1 = hex(p1)
487 xp1 = hex(p1)
488 if p2 == nullid: xp2 = ''
488 if p2 == nullid: xp2 = ''
489 else: xp2 = hex(p2)
489 else: xp2 = hex(p2)
490
490
491 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
491 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
492
492
493 if not wlock:
493 if not wlock:
494 wlock = self.wlock()
494 wlock = self.wlock()
495 if not lock:
495 if not lock:
496 lock = self.lock()
496 lock = self.lock()
497 tr = self.transaction()
497 tr = self.transaction()
498
498
499 # check in files
499 # check in files
500 new = {}
500 new = {}
501 linkrev = self.changelog.count()
501 linkrev = self.changelog.count()
502 commit.sort()
502 commit.sort()
503 for f in commit:
503 for f in commit:
504 self.ui.note(f + "\n")
504 self.ui.note(f + "\n")
505 try:
505 try:
506 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
506 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
507 t = self.wread(f)
507 t = self.wread(f)
508 except IOError:
508 except IOError:
509 self.ui.warn(_("trouble committing %s!\n") % f)
509 self.ui.warn(_("trouble committing %s!\n") % f)
510 raise
510 raise
511
511
512 r = self.file(f)
512 r = self.file(f)
513
513
514 meta = {}
514 meta = {}
515 cp = self.dirstate.copied(f)
515 cp = self.dirstate.copied(f)
516 if cp:
516 if cp:
517 meta["copy"] = cp
517 meta["copy"] = cp
518 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
518 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
519 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
519 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
520 fp1, fp2 = nullid, nullid
520 fp1, fp2 = nullid, nullid
521 else:
521 else:
522 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
522 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
523 if entry:
523 if entry:
524 new[f] = entry
524 new[f] = entry
525 continue
525 continue
526
526
527 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
527 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
528 # remember what we've added so that we can later calculate
528 # remember what we've added so that we can later calculate
529 # the files to pull from a set of changesets
529 # the files to pull from a set of changesets
530 changed.append(f)
530 changed.append(f)
531
531
532 # update manifest
532 # update manifest
533 m1 = m1.copy()
533 m1 = m1.copy()
534 m1.update(new)
534 m1.update(new)
535 for f in remove:
535 for f in remove:
536 if f in m1:
536 if f in m1:
537 del m1[f]
537 del m1[f]
538 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
538 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
539 (new, remove))
539 (new, remove))
540
540
541 # add changeset
541 # add changeset
542 new = new.keys()
542 new = new.keys()
543 new.sort()
543 new.sort()
544
544
545 user = user or self.ui.username()
545 user = user or self.ui.username()
546 if not text or force_editor:
546 if not text or force_editor:
547 edittext = []
547 edittext = []
548 if text:
548 if text:
549 edittext.append(text)
549 edittext.append(text)
550 edittext.append("")
550 edittext.append("")
551 if p2 != nullid:
551 if p2 != nullid:
552 edittext.append("HG: branch merge")
552 edittext.append("HG: branch merge")
553 edittext.extend(["HG: changed %s" % f for f in changed])
553 edittext.extend(["HG: changed %s" % f for f in changed])
554 edittext.extend(["HG: removed %s" % f for f in remove])
554 edittext.extend(["HG: removed %s" % f for f in remove])
555 if not changed and not remove:
555 if not changed and not remove:
556 edittext.append("HG: no files changed")
556 edittext.append("HG: no files changed")
557 edittext.append("")
557 edittext.append("")
558 # run editor in the repository root
558 # run editor in the repository root
559 olddir = os.getcwd()
559 olddir = os.getcwd()
560 os.chdir(self.root)
560 os.chdir(self.root)
561 text = self.ui.edit("\n".join(edittext), user)
561 text = self.ui.edit("\n".join(edittext), user)
562 os.chdir(olddir)
562 os.chdir(olddir)
563
563
564 lines = [line.rstrip() for line in text.rstrip().splitlines()]
564 lines = [line.rstrip() for line in text.rstrip().splitlines()]
565 while lines and not lines[0]:
565 while lines and not lines[0]:
566 del lines[0]
566 del lines[0]
567 if not lines:
567 if not lines:
568 return None
568 return None
569 text = '\n'.join(lines)
569 text = '\n'.join(lines)
570 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
570 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
571 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
571 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
572 parent2=xp2)
572 parent2=xp2)
573 tr.close()
573 tr.close()
574
574
575 self.dirstate.setparents(n)
575 self.dirstate.setparents(n)
576 self.dirstate.update(new, "n")
576 self.dirstate.update(new, "n")
577 self.dirstate.forget(remove)
577 self.dirstate.forget(remove)
578
578
579 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
579 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
580 return n
580 return n
581
581
582 def walk(self, node=None, files=[], match=util.always, badmatch=None):
582 def walk(self, node=None, files=[], match=util.always, badmatch=None):
583 if node:
583 if node:
584 fdict = dict.fromkeys(files)
584 fdict = dict.fromkeys(files)
585 for fn in self.manifest.read(self.changelog.read(node)[0]):
585 for fn in self.manifest.read(self.changelog.read(node)[0]):
586 fdict.pop(fn, None)
586 fdict.pop(fn, None)
587 if match(fn):
587 if match(fn):
588 yield 'm', fn
588 yield 'm', fn
589 for fn in fdict:
589 for fn in fdict:
590 if badmatch and badmatch(fn):
590 if badmatch and badmatch(fn):
591 if match(fn):
591 if match(fn):
592 yield 'b', fn
592 yield 'b', fn
593 else:
593 else:
594 self.ui.warn(_('%s: No such file in rev %s\n') % (
594 self.ui.warn(_('%s: No such file in rev %s\n') % (
595 util.pathto(self.getcwd(), fn), short(node)))
595 util.pathto(self.getcwd(), fn), short(node)))
596 else:
596 else:
597 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
597 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
598 yield src, fn
598 yield src, fn
599
599
600 def changes(self, node1=None, node2=None, files=[], match=util.always,
600 def changes(self, node1=None, node2=None, files=[], match=util.always,
601 wlock=None, show_ignored=None):
601 wlock=None, show_ignored=None):
602 """return changes between two nodes or node and working directory
602 """return changes between two nodes or node and working directory
603
603
604 If node1 is None, use the first dirstate parent instead.
604 If node1 is None, use the first dirstate parent instead.
605 If node2 is None, compare node1 with working directory.
605 If node2 is None, compare node1 with working directory.
606 """
606 """
607
607
608 def fcmp(fn, mf):
608 def fcmp(fn, mf):
609 t1 = self.wread(fn)
609 t1 = self.wread(fn)
610 t2 = self.file(fn).read(mf.get(fn, nullid))
610 t2 = self.file(fn).read(mf.get(fn, nullid))
611 return cmp(t1, t2)
611 return cmp(t1, t2)
612
612
613 def mfmatches(node):
613 def mfmatches(node):
614 change = self.changelog.read(node)
614 change = self.changelog.read(node)
615 mf = dict(self.manifest.read(change[0]))
615 mf = dict(self.manifest.read(change[0]))
616 for fn in mf.keys():
616 for fn in mf.keys():
617 if not match(fn):
617 if not match(fn):
618 del mf[fn]
618 del mf[fn]
619 return mf
619 return mf
620
620
621 if node1:
621 if node1:
622 # read the manifest from node1 before the manifest from node2,
622 # read the manifest from node1 before the manifest from node2,
623 # so that we'll hit the manifest cache if we're going through
623 # so that we'll hit the manifest cache if we're going through
624 # all the revisions in parent->child order.
624 # all the revisions in parent->child order.
625 mf1 = mfmatches(node1)
625 mf1 = mfmatches(node1)
626
626
627 # are we comparing the working directory?
627 # are we comparing the working directory?
628 if not node2:
628 if not node2:
629 if not wlock:
629 if not wlock:
630 try:
630 try:
631 wlock = self.wlock(wait=0)
631 wlock = self.wlock(wait=0)
632 except lock.LockException:
632 except lock.LockException:
633 wlock = None
633 wlock = None
634 lookup, modified, added, removed, deleted, unknown, ignored = (
634 lookup, modified, added, removed, deleted, unknown, ignored = (
635 self.dirstate.changes(files, match, show_ignored))
635 self.dirstate.changes(files, match, show_ignored))
636
636
637 # are we comparing working dir against its parent?
637 # are we comparing working dir against its parent?
638 if not node1:
638 if not node1:
639 if lookup:
639 if lookup:
640 # do a full compare of any files that might have changed
640 # do a full compare of any files that might have changed
641 mf2 = mfmatches(self.dirstate.parents()[0])
641 mf2 = mfmatches(self.dirstate.parents()[0])
642 for f in lookup:
642 for f in lookup:
643 if fcmp(f, mf2):
643 if fcmp(f, mf2):
644 modified.append(f)
644 modified.append(f)
645 elif wlock is not None:
645 elif wlock is not None:
646 self.dirstate.update([f], "n")
646 self.dirstate.update([f], "n")
647 else:
647 else:
648 # we are comparing working dir against non-parent
648 # we are comparing working dir against non-parent
649 # generate a pseudo-manifest for the working dir
649 # generate a pseudo-manifest for the working dir
650 mf2 = mfmatches(self.dirstate.parents()[0])
650 mf2 = mfmatches(self.dirstate.parents()[0])
651 for f in lookup + modified + added:
651 for f in lookup + modified + added:
652 mf2[f] = ""
652 mf2[f] = ""
653 for f in removed:
653 for f in removed:
654 if f in mf2:
654 if f in mf2:
655 del mf2[f]
655 del mf2[f]
656 else:
656 else:
657 # we are comparing two revisions
657 # we are comparing two revisions
658 deleted, unknown, ignored = [], [], []
658 deleted, unknown, ignored = [], [], []
659 mf2 = mfmatches(node2)
659 mf2 = mfmatches(node2)
660
660
661 if node1:
661 if node1:
662 # flush lists from dirstate before comparing manifests
662 # flush lists from dirstate before comparing manifests
663 modified, added = [], []
663 modified, added = [], []
664
664
665 for fn in mf2:
665 for fn in mf2:
666 if mf1.has_key(fn):
666 if mf1.has_key(fn):
667 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
667 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
668 modified.append(fn)
668 modified.append(fn)
669 del mf1[fn]
669 del mf1[fn]
670 else:
670 else:
671 added.append(fn)
671 added.append(fn)
672
672
673 removed = mf1.keys()
673 removed = mf1.keys()
674
674
675 # sort and return results:
675 # sort and return results:
676 for l in modified, added, removed, deleted, unknown, ignored:
676 for l in modified, added, removed, deleted, unknown, ignored:
677 l.sort()
677 l.sort()
678 if show_ignored is None:
678 if show_ignored is None:
679 return (modified, added, removed, deleted, unknown)
679 return (modified, added, removed, deleted, unknown)
680 else:
680 else:
681 return (modified, added, removed, deleted, unknown, ignored)
681 return (modified, added, removed, deleted, unknown, ignored)
682
682
683 def add(self, list, wlock=None):
683 def add(self, list, wlock=None):
684 if not wlock:
684 if not wlock:
685 wlock = self.wlock()
685 wlock = self.wlock()
686 for f in list:
686 for f in list:
687 p = self.wjoin(f)
687 p = self.wjoin(f)
688 if not os.path.exists(p):
688 if not os.path.exists(p):
689 self.ui.warn(_("%s does not exist!\n") % f)
689 self.ui.warn(_("%s does not exist!\n") % f)
690 elif not os.path.isfile(p):
690 elif not os.path.isfile(p):
691 self.ui.warn(_("%s not added: only files supported currently\n")
691 self.ui.warn(_("%s not added: only files supported currently\n")
692 % f)
692 % f)
693 elif self.dirstate.state(f) in 'an':
693 elif self.dirstate.state(f) in 'an':
694 self.ui.warn(_("%s already tracked!\n") % f)
694 self.ui.warn(_("%s already tracked!\n") % f)
695 else:
695 else:
696 self.dirstate.update([f], "a")
696 self.dirstate.update([f], "a")
697
697
698 def forget(self, list, wlock=None):
698 def forget(self, list, wlock=None):
699 if not wlock:
699 if not wlock:
700 wlock = self.wlock()
700 wlock = self.wlock()
701 for f in list:
701 for f in list:
702 if self.dirstate.state(f) not in 'ai':
702 if self.dirstate.state(f) not in 'ai':
703 self.ui.warn(_("%s not added!\n") % f)
703 self.ui.warn(_("%s not added!\n") % f)
704 else:
704 else:
705 self.dirstate.forget([f])
705 self.dirstate.forget([f])
706
706
707 def remove(self, list, unlink=False, wlock=None):
707 def remove(self, list, unlink=False, wlock=None):
708 if unlink:
708 if unlink:
709 for f in list:
709 for f in list:
710 try:
710 try:
711 util.unlink(self.wjoin(f))
711 util.unlink(self.wjoin(f))
712 except OSError, inst:
712 except OSError, inst:
713 if inst.errno != errno.ENOENT:
713 if inst.errno != errno.ENOENT:
714 raise
714 raise
715 if not wlock:
715 if not wlock:
716 wlock = self.wlock()
716 wlock = self.wlock()
717 for f in list:
717 for f in list:
718 p = self.wjoin(f)
718 p = self.wjoin(f)
719 if os.path.exists(p):
719 if os.path.exists(p):
720 self.ui.warn(_("%s still exists!\n") % f)
720 self.ui.warn(_("%s still exists!\n") % f)
721 elif self.dirstate.state(f) == 'a':
721 elif self.dirstate.state(f) == 'a':
722 self.dirstate.forget([f])
722 self.dirstate.forget([f])
723 elif f not in self.dirstate:
723 elif f not in self.dirstate:
724 self.ui.warn(_("%s not tracked!\n") % f)
724 self.ui.warn(_("%s not tracked!\n") % f)
725 else:
725 else:
726 self.dirstate.update([f], "r")
726 self.dirstate.update([f], "r")
727
727
728 def undelete(self, list, wlock=None):
728 def undelete(self, list, wlock=None):
729 p = self.dirstate.parents()[0]
729 p = self.dirstate.parents()[0]
730 mn = self.changelog.read(p)[0]
730 mn = self.changelog.read(p)[0]
731 mf = self.manifest.readflags(mn)
731 mf = self.manifest.readflags(mn)
732 m = self.manifest.read(mn)
732 m = self.manifest.read(mn)
733 if not wlock:
733 if not wlock:
734 wlock = self.wlock()
734 wlock = self.wlock()
735 for f in list:
735 for f in list:
736 if self.dirstate.state(f) not in "r":
736 if self.dirstate.state(f) not in "r":
737 self.ui.warn("%s not removed!\n" % f)
737 self.ui.warn("%s not removed!\n" % f)
738 else:
738 else:
739 t = self.file(f).read(m[f])
739 t = self.file(f).read(m[f])
740 self.wwrite(f, t)
740 self.wwrite(f, t)
741 util.set_exec(self.wjoin(f), mf[f])
741 util.set_exec(self.wjoin(f), mf[f])
742 self.dirstate.update([f], "n")
742 self.dirstate.update([f], "n")
743
743
744 def copy(self, source, dest, wlock=None):
744 def copy(self, source, dest, wlock=None):
745 p = self.wjoin(dest)
745 p = self.wjoin(dest)
746 if not os.path.exists(p):
746 if not os.path.exists(p):
747 self.ui.warn(_("%s does not exist!\n") % dest)
747 self.ui.warn(_("%s does not exist!\n") % dest)
748 elif not os.path.isfile(p):
748 elif not os.path.isfile(p):
749 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
749 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
750 else:
750 else:
751 if not wlock:
751 if not wlock:
752 wlock = self.wlock()
752 wlock = self.wlock()
753 if self.dirstate.state(dest) == '?':
753 if self.dirstate.state(dest) == '?':
754 self.dirstate.update([dest], "a")
754 self.dirstate.update([dest], "a")
755 self.dirstate.copy(source, dest)
755 self.dirstate.copy(source, dest)
756
756
757 def heads(self, start=None):
757 def heads(self, start=None):
758 heads = self.changelog.heads(start)
758 heads = self.changelog.heads(start)
759 # sort the output in rev descending order
759 # sort the output in rev descending order
760 heads = [(-self.changelog.rev(h), h) for h in heads]
760 heads = [(-self.changelog.rev(h), h) for h in heads]
761 heads.sort()
761 heads.sort()
762 return [n for (r, n) in heads]
762 return [n for (r, n) in heads]
763
763
764 # branchlookup returns a dict giving a list of branches for
764 # branchlookup returns a dict giving a list of branches for
765 # each head. A branch is defined as the tag of a node or
765 # each head. A branch is defined as the tag of a node or
766 # the branch of the node's parents. If a node has multiple
766 # the branch of the node's parents. If a node has multiple
767 # branch tags, tags are eliminated if they are visible from other
767 # branch tags, tags are eliminated if they are visible from other
768 # branch tags.
768 # branch tags.
769 #
769 #
770 # So, for this graph: a->b->c->d->e
770 # So, for this graph: a->b->c->d->e
771 # \ /
771 # \ /
772 # aa -----/
772 # aa -----/
773 # a has tag 2.6.12
773 # a has tag 2.6.12
774 # d has tag 2.6.13
774 # d has tag 2.6.13
775 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
775 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
776 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
776 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
777 # from the list.
777 # from the list.
778 #
778 #
779 # It is possible that more than one head will have the same branch tag.
779 # It is possible that more than one head will have the same branch tag.
780 # callers need to check the result for multiple heads under the same
780 # callers need to check the result for multiple heads under the same
781 # branch tag if that is a problem for them (ie checkout of a specific
781 # branch tag if that is a problem for them (ie checkout of a specific
782 # branch).
782 # branch).
783 #
783 #
784 # passing in a specific branch will limit the depth of the search
784 # passing in a specific branch will limit the depth of the search
785 # through the parents. It won't limit the branches returned in the
785 # through the parents. It won't limit the branches returned in the
786 # result though.
786 # result though.
787 def branchlookup(self, heads=None, branch=None):
787 def branchlookup(self, heads=None, branch=None):
788 if not heads:
788 if not heads:
789 heads = self.heads()
789 heads = self.heads()
790 headt = [ h for h in heads ]
790 headt = [ h for h in heads ]
791 chlog = self.changelog
791 chlog = self.changelog
792 branches = {}
792 branches = {}
793 merges = []
793 merges = []
794 seenmerge = {}
794 seenmerge = {}
795
795
796 # traverse the tree once for each head, recording in the branches
796 # traverse the tree once for each head, recording in the branches
797 # dict which tags are visible from this head. The branches
797 # dict which tags are visible from this head. The branches
798 # dict also records which tags are visible from each tag
798 # dict also records which tags are visible from each tag
799 # while we traverse.
799 # while we traverse.
800 while headt or merges:
800 while headt or merges:
801 if merges:
801 if merges:
802 n, found = merges.pop()
802 n, found = merges.pop()
803 visit = [n]
803 visit = [n]
804 else:
804 else:
805 h = headt.pop()
805 h = headt.pop()
806 visit = [h]
806 visit = [h]
807 found = [h]
807 found = [h]
808 seen = {}
808 seen = {}
809 while visit:
809 while visit:
810 n = visit.pop()
810 n = visit.pop()
811 if n in seen:
811 if n in seen:
812 continue
812 continue
813 pp = chlog.parents(n)
813 pp = chlog.parents(n)
814 tags = self.nodetags(n)
814 tags = self.nodetags(n)
815 if tags:
815 if tags:
816 for x in tags:
816 for x in tags:
817 if x == 'tip':
817 if x == 'tip':
818 continue
818 continue
819 for f in found:
819 for f in found:
820 branches.setdefault(f, {})[n] = 1
820 branches.setdefault(f, {})[n] = 1
821 branches.setdefault(n, {})[n] = 1
821 branches.setdefault(n, {})[n] = 1
822 break
822 break
823 if n not in found:
823 if n not in found:
824 found.append(n)
824 found.append(n)
825 if branch in tags:
825 if branch in tags:
826 continue
826 continue
827 seen[n] = 1
827 seen[n] = 1
828 if pp[1] != nullid and n not in seenmerge:
828 if pp[1] != nullid and n not in seenmerge:
829 merges.append((pp[1], [x for x in found]))
829 merges.append((pp[1], [x for x in found]))
830 seenmerge[n] = 1
830 seenmerge[n] = 1
831 if pp[0] != nullid:
831 if pp[0] != nullid:
832 visit.append(pp[0])
832 visit.append(pp[0])
833 # traverse the branches dict, eliminating branch tags from each
833 # traverse the branches dict, eliminating branch tags from each
834 # head that are visible from another branch tag for that head.
834 # head that are visible from another branch tag for that head.
835 out = {}
835 out = {}
836 viscache = {}
836 viscache = {}
837 for h in heads:
837 for h in heads:
838 def visible(node):
838 def visible(node):
839 if node in viscache:
839 if node in viscache:
840 return viscache[node]
840 return viscache[node]
841 ret = {}
841 ret = {}
842 visit = [node]
842 visit = [node]
843 while visit:
843 while visit:
844 x = visit.pop()
844 x = visit.pop()
845 if x in viscache:
845 if x in viscache:
846 ret.update(viscache[x])
846 ret.update(viscache[x])
847 elif x not in ret:
847 elif x not in ret:
848 ret[x] = 1
848 ret[x] = 1
849 if x in branches:
849 if x in branches:
850 visit[len(visit):] = branches[x].keys()
850 visit[len(visit):] = branches[x].keys()
851 viscache[node] = ret
851 viscache[node] = ret
852 return ret
852 return ret
853 if h not in branches:
853 if h not in branches:
854 continue
854 continue
855 # O(n^2), but somewhat limited. This only searches the
855 # O(n^2), but somewhat limited. This only searches the
856 # tags visible from a specific head, not all the tags in the
856 # tags visible from a specific head, not all the tags in the
857 # whole repo.
857 # whole repo.
858 for b in branches[h]:
858 for b in branches[h]:
859 vis = False
859 vis = False
860 for bb in branches[h].keys():
860 for bb in branches[h].keys():
861 if b != bb:
861 if b != bb:
862 if b in visible(bb):
862 if b in visible(bb):
863 vis = True
863 vis = True
864 break
864 break
865 if not vis:
865 if not vis:
866 l = out.setdefault(h, [])
866 l = out.setdefault(h, [])
867 l[len(l):] = self.nodetags(b)
867 l[len(l):] = self.nodetags(b)
868 return out
868 return out
869
869
870 def branches(self, nodes):
870 def branches(self, nodes):
871 if not nodes:
871 if not nodes:
872 nodes = [self.changelog.tip()]
872 nodes = [self.changelog.tip()]
873 b = []
873 b = []
874 for n in nodes:
874 for n in nodes:
875 t = n
875 t = n
876 while 1:
876 while 1:
877 p = self.changelog.parents(n)
877 p = self.changelog.parents(n)
878 if p[1] != nullid or p[0] == nullid:
878 if p[1] != nullid or p[0] == nullid:
879 b.append((t, n, p[0], p[1]))
879 b.append((t, n, p[0], p[1]))
880 break
880 break
881 n = p[0]
881 n = p[0]
882 return b
882 return b
883
883
884 def between(self, pairs):
884 def between(self, pairs):
885 r = []
885 r = []
886
886
887 for top, bottom in pairs:
887 for top, bottom in pairs:
888 n, l, i = top, [], 0
888 n, l, i = top, [], 0
889 f = 1
889 f = 1
890
890
891 while n != bottom:
891 while n != bottom:
892 p = self.changelog.parents(n)[0]
892 p = self.changelog.parents(n)[0]
893 if i == f:
893 if i == f:
894 l.append(n)
894 l.append(n)
895 f = f * 2
895 f = f * 2
896 n = p
896 n = p
897 i += 1
897 i += 1
898
898
899 r.append(l)
899 r.append(l)
900
900
901 return r
901 return r
902
902
903 def findincoming(self, remote, base=None, heads=None, force=False):
903 def findincoming(self, remote, base=None, heads=None, force=False):
904 """Return list of roots of the subsets of missing nodes from remote
904 """Return list of roots of the subsets of missing nodes from remote
905
905
906 If base dict is specified, assume that these nodes and their parents
906 If base dict is specified, assume that these nodes and their parents
907 exist on the remote side and that no child of a node of base exists
907 exist on the remote side and that no child of a node of base exists
908 in both remote and self.
908 in both remote and self.
909 Furthermore base will be updated to include the nodes that exists
909 Furthermore base will be updated to include the nodes that exists
910 in self and remote but no children exists in self and remote.
910 in self and remote but no children exists in self and remote.
911 If a list of heads is specified, return only nodes which are heads
911 If a list of heads is specified, return only nodes which are heads
912 or ancestors of these heads.
912 or ancestors of these heads.
913
913
914 All the ancestors of base are in self and in remote.
914 All the ancestors of base are in self and in remote.
915 All the descendants of the list returned are missing in self.
915 All the descendants of the list returned are missing in self.
916 (and so we know that the rest of the nodes are missing in remote, see
916 (and so we know that the rest of the nodes are missing in remote, see
917 outgoing)
917 outgoing)
918 """
918 """
919 m = self.changelog.nodemap
919 m = self.changelog.nodemap
920 search = []
920 search = []
921 fetch = {}
921 fetch = {}
922 seen = {}
922 seen = {}
923 seenbranch = {}
923 seenbranch = {}
924 if base == None:
924 if base == None:
925 base = {}
925 base = {}
926
926
927 if not heads:
927 if not heads:
928 heads = remote.heads()
928 heads = remote.heads()
929
929
930 if self.changelog.tip() == nullid:
930 if self.changelog.tip() == nullid:
931 base[nullid] = 1
931 base[nullid] = 1
932 if heads != [nullid]:
932 if heads != [nullid]:
933 return [nullid]
933 return [nullid]
934 return []
934 return []
935
935
936 # assume we're closer to the tip than the root
936 # assume we're closer to the tip than the root
937 # and start by examining the heads
937 # and start by examining the heads
938 self.ui.status(_("searching for changes\n"))
938 self.ui.status(_("searching for changes\n"))
939
939
940 unknown = []
940 unknown = []
941 for h in heads:
941 for h in heads:
942 if h not in m:
942 if h not in m:
943 unknown.append(h)
943 unknown.append(h)
944 else:
944 else:
945 base[h] = 1
945 base[h] = 1
946
946
947 if not unknown:
947 if not unknown:
948 return []
948 return []
949
949
950 req = dict.fromkeys(unknown)
950 req = dict.fromkeys(unknown)
951 reqcnt = 0
951 reqcnt = 0
952
952
953 # search through remote branches
953 # search through remote branches
954 # a 'branch' here is a linear segment of history, with four parts:
954 # a 'branch' here is a linear segment of history, with four parts:
955 # head, root, first parent, second parent
955 # head, root, first parent, second parent
956 # (a branch always has two parents (or none) by definition)
956 # (a branch always has two parents (or none) by definition)
957 unknown = remote.branches(unknown)
957 unknown = remote.branches(unknown)
958 while unknown:
958 while unknown:
959 r = []
959 r = []
960 while unknown:
960 while unknown:
961 n = unknown.pop(0)
961 n = unknown.pop(0)
962 if n[0] in seen:
962 if n[0] in seen:
963 continue
963 continue
964
964
965 self.ui.debug(_("examining %s:%s\n")
965 self.ui.debug(_("examining %s:%s\n")
966 % (short(n[0]), short(n[1])))
966 % (short(n[0]), short(n[1])))
967 if n[0] == nullid: # found the end of the branch
967 if n[0] == nullid: # found the end of the branch
968 pass
968 pass
969 elif n in seenbranch:
969 elif n in seenbranch:
970 self.ui.debug(_("branch already found\n"))
970 self.ui.debug(_("branch already found\n"))
971 continue
971 continue
972 elif n[1] and n[1] in m: # do we know the base?
972 elif n[1] and n[1] in m: # do we know the base?
973 self.ui.debug(_("found incomplete branch %s:%s\n")
973 self.ui.debug(_("found incomplete branch %s:%s\n")
974 % (short(n[0]), short(n[1])))
974 % (short(n[0]), short(n[1])))
975 search.append(n) # schedule branch range for scanning
975 search.append(n) # schedule branch range for scanning
976 seenbranch[n] = 1
976 seenbranch[n] = 1
977 else:
977 else:
978 if n[1] not in seen and n[1] not in fetch:
978 if n[1] not in seen and n[1] not in fetch:
979 if n[2] in m and n[3] in m:
979 if n[2] in m and n[3] in m:
980 self.ui.debug(_("found new changeset %s\n") %
980 self.ui.debug(_("found new changeset %s\n") %
981 short(n[1]))
981 short(n[1]))
982 fetch[n[1]] = 1 # earliest unknown
982 fetch[n[1]] = 1 # earliest unknown
983 for p in n[2:4]:
983 for p in n[2:4]:
984 if p in m:
984 if p in m:
985 base[p] = 1 # latest known
985 base[p] = 1 # latest known
986
986
987 for p in n[2:4]:
987 for p in n[2:4]:
988 if p not in req and p not in m:
988 if p not in req and p not in m:
989 r.append(p)
989 r.append(p)
990 req[p] = 1
990 req[p] = 1
991 seen[n[0]] = 1
991 seen[n[0]] = 1
992
992
993 if r:
993 if r:
994 reqcnt += 1
994 reqcnt += 1
995 self.ui.debug(_("request %d: %s\n") %
995 self.ui.debug(_("request %d: %s\n") %
996 (reqcnt, " ".join(map(short, r))))
996 (reqcnt, " ".join(map(short, r))))
997 for p in range(0, len(r), 10):
997 for p in range(0, len(r), 10):
998 for b in remote.branches(r[p:p+10]):
998 for b in remote.branches(r[p:p+10]):
999 self.ui.debug(_("received %s:%s\n") %
999 self.ui.debug(_("received %s:%s\n") %
1000 (short(b[0]), short(b[1])))
1000 (short(b[0]), short(b[1])))
1001 unknown.append(b)
1001 unknown.append(b)
1002
1002
1003 # do binary search on the branches we found
1003 # do binary search on the branches we found
1004 while search:
1004 while search:
1005 n = search.pop(0)
1005 n = search.pop(0)
1006 reqcnt += 1
1006 reqcnt += 1
1007 l = remote.between([(n[0], n[1])])[0]
1007 l = remote.between([(n[0], n[1])])[0]
1008 l.append(n[1])
1008 l.append(n[1])
1009 p = n[0]
1009 p = n[0]
1010 f = 1
1010 f = 1
1011 for i in l:
1011 for i in l:
1012 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1012 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1013 if i in m:
1013 if i in m:
1014 if f <= 2:
1014 if f <= 2:
1015 self.ui.debug(_("found new branch changeset %s\n") %
1015 self.ui.debug(_("found new branch changeset %s\n") %
1016 short(p))
1016 short(p))
1017 fetch[p] = 1
1017 fetch[p] = 1
1018 base[i] = 1
1018 base[i] = 1
1019 else:
1019 else:
1020 self.ui.debug(_("narrowed branch search to %s:%s\n")
1020 self.ui.debug(_("narrowed branch search to %s:%s\n")
1021 % (short(p), short(i)))
1021 % (short(p), short(i)))
1022 search.append((p, i))
1022 search.append((p, i))
1023 break
1023 break
1024 p, f = i, f * 2
1024 p, f = i, f * 2
1025
1025
1026 # sanity check our fetch list
1026 # sanity check our fetch list
1027 for f in fetch.keys():
1027 for f in fetch.keys():
1028 if f in m:
1028 if f in m:
1029 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1029 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1030
1030
1031 if base.keys() == [nullid]:
1031 if base.keys() == [nullid]:
1032 if force:
1032 if force:
1033 self.ui.warn(_("warning: repository is unrelated\n"))
1033 self.ui.warn(_("warning: repository is unrelated\n"))
1034 else:
1034 else:
1035 raise util.Abort(_("repository is unrelated"))
1035 raise util.Abort(_("repository is unrelated"))
1036
1036
1037 self.ui.note(_("found new changesets starting at ") +
1037 self.ui.note(_("found new changesets starting at ") +
1038 " ".join([short(f) for f in fetch]) + "\n")
1038 " ".join([short(f) for f in fetch]) + "\n")
1039
1039
1040 self.ui.debug(_("%d total queries\n") % reqcnt)
1040 self.ui.debug(_("%d total queries\n") % reqcnt)
1041
1041
1042 return fetch.keys()
1042 return fetch.keys()
1043
1043
1044 def findoutgoing(self, remote, base=None, heads=None, force=False):
1044 def findoutgoing(self, remote, base=None, heads=None, force=False):
1045 """Return list of nodes that are roots of subsets not in remote
1045 """Return list of nodes that are roots of subsets not in remote
1046
1046
1047 If base dict is specified, assume that these nodes and their parents
1047 If base dict is specified, assume that these nodes and their parents
1048 exist on the remote side.
1048 exist on the remote side.
1049 If a list of heads is specified, return only nodes which are heads
1049 If a list of heads is specified, return only nodes which are heads
1050 or ancestors of these heads, and return a second element which
1050 or ancestors of these heads, and return a second element which
1051 contains all remote heads which get new children.
1051 contains all remote heads which get new children.
1052 """
1052 """
1053 if base == None:
1053 if base == None:
1054 base = {}
1054 base = {}
1055 self.findincoming(remote, base, heads, force=force)
1055 self.findincoming(remote, base, heads, force=force)
1056
1056
1057 self.ui.debug(_("common changesets up to ")
1057 self.ui.debug(_("common changesets up to ")
1058 + " ".join(map(short, base.keys())) + "\n")
1058 + " ".join(map(short, base.keys())) + "\n")
1059
1059
1060 remain = dict.fromkeys(self.changelog.nodemap)
1060 remain = dict.fromkeys(self.changelog.nodemap)
1061
1061
1062 # prune everything remote has from the tree
1062 # prune everything remote has from the tree
1063 del remain[nullid]
1063 del remain[nullid]
1064 remove = base.keys()
1064 remove = base.keys()
1065 while remove:
1065 while remove:
1066 n = remove.pop(0)
1066 n = remove.pop(0)
1067 if n in remain:
1067 if n in remain:
1068 del remain[n]
1068 del remain[n]
1069 for p in self.changelog.parents(n):
1069 for p in self.changelog.parents(n):
1070 remove.append(p)
1070 remove.append(p)
1071
1071
1072 # find every node whose parents have been pruned
1072 # find every node whose parents have been pruned
1073 subset = []
1073 subset = []
1074 # find every remote head that will get new children
1074 # find every remote head that will get new children
1075 updated_heads = {}
1075 updated_heads = {}
1076 for n in remain:
1076 for n in remain:
1077 p1, p2 = self.changelog.parents(n)
1077 p1, p2 = self.changelog.parents(n)
1078 if p1 not in remain and p2 not in remain:
1078 if p1 not in remain and p2 not in remain:
1079 subset.append(n)
1079 subset.append(n)
1080 if heads:
1080 if heads:
1081 if p1 in heads:
1081 if p1 in heads:
1082 updated_heads[p1] = True
1082 updated_heads[p1] = True
1083 if p2 in heads:
1083 if p2 in heads:
1084 updated_heads[p2] = True
1084 updated_heads[p2] = True
1085
1085
1086 # this is the set of all roots we have to push
1086 # this is the set of all roots we have to push
1087 if heads:
1087 if heads:
1088 return subset, updated_heads.keys()
1088 return subset, updated_heads.keys()
1089 else:
1089 else:
1090 return subset
1090 return subset
1091
1091
1092 def pull(self, remote, heads=None, force=False):
1092 def pull(self, remote, heads=None, force=False):
1093 l = self.lock()
1093 l = self.lock()
1094
1094
1095 fetch = self.findincoming(remote, force=force)
1095 fetch = self.findincoming(remote, force=force)
1096 if fetch == [nullid]:
1096 if fetch == [nullid]:
1097 self.ui.status(_("requesting all changes\n"))
1097 self.ui.status(_("requesting all changes\n"))
1098
1098
1099 if not fetch:
1099 if not fetch:
1100 self.ui.status(_("no changes found\n"))
1100 self.ui.status(_("no changes found\n"))
1101 return 0
1101 return 0
1102
1102
1103 if heads is None:
1103 if heads is None:
1104 cg = remote.changegroup(fetch, 'pull')
1104 cg = remote.changegroup(fetch, 'pull')
1105 else:
1105 else:
1106 cg = remote.changegroupsubset(fetch, heads, 'pull')
1106 cg = remote.changegroupsubset(fetch, heads, 'pull')
1107 return self.addchangegroup(cg, 'pull')
1107 return self.addchangegroup(cg, 'pull')
1108
1108
1109 def push(self, remote, force=False, revs=None):
1109 def push(self, remote, force=False, revs=None):
1110 # there are two ways to push to remote repo:
1110 # there are two ways to push to remote repo:
1111 #
1111 #
1112 # addchangegroup assumes local user can lock remote
1112 # addchangegroup assumes local user can lock remote
1113 # repo (local filesystem, old ssh servers).
1113 # repo (local filesystem, old ssh servers).
1114 #
1114 #
1115 # unbundle assumes local user cannot lock remote repo (new ssh
1115 # unbundle assumes local user cannot lock remote repo (new ssh
1116 # servers, http servers).
1116 # servers, http servers).
1117
1117
1118 if 'unbundle' in remote.capabilities:
1118 if 'unbundle' in remote.capabilities:
1119 self.push_unbundle(remote, force, revs)
1119 self.push_unbundle(remote, force, revs)
1120 else:
1120 else:
1121 self.push_addchangegroup(remote, force, revs)
1121 self.push_addchangegroup(remote, force, revs)
1122
1122
1123 def prepush(self, remote, force, revs):
1123 def prepush(self, remote, force, revs):
1124 base = {}
1124 base = {}
1125 remote_heads = remote.heads()
1125 remote_heads = remote.heads()
1126 inc = self.findincoming(remote, base, remote_heads, force=force)
1126 inc = self.findincoming(remote, base, remote_heads, force=force)
1127 if not force and inc:
1127 if not force and inc:
1128 self.ui.warn(_("abort: unsynced remote changes!\n"))
1128 self.ui.warn(_("abort: unsynced remote changes!\n"))
1129 self.ui.status(_("(did you forget to sync?"
1129 self.ui.status(_("(did you forget to sync?"
1130 " use push -f to force)\n"))
1130 " use push -f to force)\n"))
1131 return None, 1
1131 return None, 1
1132
1132
1133 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1133 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1134 if revs is not None:
1134 if revs is not None:
1135 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1135 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1136 else:
1136 else:
1137 bases, heads = update, self.changelog.heads()
1137 bases, heads = update, self.changelog.heads()
1138
1138
1139 if not bases:
1139 if not bases:
1140 self.ui.status(_("no changes found\n"))
1140 self.ui.status(_("no changes found\n"))
1141 return None, 1
1141 return None, 1
1142 elif not force:
1142 elif not force:
1143 # FIXME we don't properly detect creation of new heads
1143 # FIXME we don't properly detect creation of new heads
1144 # in the push -r case, assume the user knows what he's doing
1144 # in the push -r case, assume the user knows what he's doing
1145 if not revs and len(remote_heads) < len(heads) \
1145 if not revs and len(remote_heads) < len(heads) \
1146 and remote_heads != [nullid]:
1146 and remote_heads != [nullid]:
1147 self.ui.warn(_("abort: push creates new remote branches!\n"))
1147 self.ui.warn(_("abort: push creates new remote branches!\n"))
1148 self.ui.status(_("(did you forget to merge?"
1148 self.ui.status(_("(did you forget to merge?"
1149 " use push -f to force)\n"))
1149 " use push -f to force)\n"))
1150 return None, 1
1150 return None, 1
1151
1151
1152 if revs is None:
1152 if revs is None:
1153 cg = self.changegroup(update, 'push')
1153 cg = self.changegroup(update, 'push')
1154 else:
1154 else:
1155 cg = self.changegroupsubset(update, revs, 'push')
1155 cg = self.changegroupsubset(update, revs, 'push')
1156 return cg, remote_heads
1156 return cg, remote_heads
1157
1157
1158 def push_addchangegroup(self, remote, force, revs):
1158 def push_addchangegroup(self, remote, force, revs):
1159 lock = remote.lock()
1159 lock = remote.lock()
1160
1160
1161 ret = self.prepush(remote, force, revs)
1161 ret = self.prepush(remote, force, revs)
1162 if ret[0] is not None:
1162 if ret[0] is not None:
1163 cg, remote_heads = ret
1163 cg, remote_heads = ret
1164 return remote.addchangegroup(cg, 'push')
1164 return remote.addchangegroup(cg, 'push')
1165 return ret[1]
1165 return ret[1]
1166
1166
1167 def push_unbundle(self, remote, force, revs):
1167 def push_unbundle(self, remote, force, revs):
1168 # local repo finds heads on server, finds out what revs it
1168 # local repo finds heads on server, finds out what revs it
1169 # must push. once revs transferred, if server finds it has
1169 # must push. once revs transferred, if server finds it has
1170 # different heads (someone else won commit/push race), server
1170 # different heads (someone else won commit/push race), server
1171 # aborts.
1171 # aborts.
1172
1172
1173 ret = self.prepush(remote, force, revs)
1173 ret = self.prepush(remote, force, revs)
1174 if ret[0] is not None:
1174 if ret[0] is not None:
1175 cg, remote_heads = ret
1175 cg, remote_heads = ret
1176 if force: remote_heads = ['force']
1176 if force: remote_heads = ['force']
1177 return remote.unbundle(cg, remote_heads, 'push')
1177 return remote.unbundle(cg, remote_heads, 'push')
1178 return ret[1]
1178 return ret[1]
1179
1179
1180 def changegroupsubset(self, bases, heads, source):
1180 def changegroupsubset(self, bases, heads, source):
1181 """This function generates a changegroup consisting of all the nodes
1181 """This function generates a changegroup consisting of all the nodes
1182 that are descendents of any of the bases, and ancestors of any of
1182 that are descendents of any of the bases, and ancestors of any of
1183 the heads.
1183 the heads.
1184
1184
1185 It is fairly complex as determining which filenodes and which
1185 It is fairly complex as determining which filenodes and which
1186 manifest nodes need to be included for the changeset to be complete
1186 manifest nodes need to be included for the changeset to be complete
1187 is non-trivial.
1187 is non-trivial.
1188
1188
1189 Another wrinkle is doing the reverse, figuring out which changeset in
1189 Another wrinkle is doing the reverse, figuring out which changeset in
1190 the changegroup a particular filenode or manifestnode belongs to."""
1190 the changegroup a particular filenode or manifestnode belongs to."""
1191
1191
1192 self.hook('preoutgoing', throw=True, source=source)
1192 self.hook('preoutgoing', throw=True, source=source)
1193
1193
1194 # Set up some initial variables
1194 # Set up some initial variables
1195 # Make it easy to refer to self.changelog
1195 # Make it easy to refer to self.changelog
1196 cl = self.changelog
1196 cl = self.changelog
1197 # msng is short for missing - compute the list of changesets in this
1197 # msng is short for missing - compute the list of changesets in this
1198 # changegroup.
1198 # changegroup.
1199 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1199 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1200 # Some bases may turn out to be superfluous, and some heads may be
1200 # Some bases may turn out to be superfluous, and some heads may be
1201 # too. nodesbetween will return the minimal set of bases and heads
1201 # too. nodesbetween will return the minimal set of bases and heads
1202 # necessary to re-create the changegroup.
1202 # necessary to re-create the changegroup.
1203
1203
1204 # Known heads are the list of heads that it is assumed the recipient
1204 # Known heads are the list of heads that it is assumed the recipient
1205 # of this changegroup will know about.
1205 # of this changegroup will know about.
1206 knownheads = {}
1206 knownheads = {}
1207 # We assume that all parents of bases are known heads.
1207 # We assume that all parents of bases are known heads.
1208 for n in bases:
1208 for n in bases:
1209 for p in cl.parents(n):
1209 for p in cl.parents(n):
1210 if p != nullid:
1210 if p != nullid:
1211 knownheads[p] = 1
1211 knownheads[p] = 1
1212 knownheads = knownheads.keys()
1212 knownheads = knownheads.keys()
1213 if knownheads:
1213 if knownheads:
1214 # Now that we know what heads are known, we can compute which
1214 # Now that we know what heads are known, we can compute which
1215 # changesets are known. The recipient must know about all
1215 # changesets are known. The recipient must know about all
1216 # changesets required to reach the known heads from the null
1216 # changesets required to reach the known heads from the null
1217 # changeset.
1217 # changeset.
1218 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1218 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1219 junk = None
1219 junk = None
1220 # Transform the list into an ersatz set.
1220 # Transform the list into an ersatz set.
1221 has_cl_set = dict.fromkeys(has_cl_set)
1221 has_cl_set = dict.fromkeys(has_cl_set)
1222 else:
1222 else:
1223 # If there were no known heads, the recipient cannot be assumed to
1223 # If there were no known heads, the recipient cannot be assumed to
1224 # know about any changesets.
1224 # know about any changesets.
1225 has_cl_set = {}
1225 has_cl_set = {}
1226
1226
1227 # Make it easy to refer to self.manifest
1227 # Make it easy to refer to self.manifest
1228 mnfst = self.manifest
1228 mnfst = self.manifest
1229 # We don't know which manifests are missing yet
1229 # We don't know which manifests are missing yet
1230 msng_mnfst_set = {}
1230 msng_mnfst_set = {}
1231 # Nor do we know which filenodes are missing.
1231 # Nor do we know which filenodes are missing.
1232 msng_filenode_set = {}
1232 msng_filenode_set = {}
1233
1233
1234 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1234 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1235 junk = None
1235 junk = None
1236
1236
1237 # A changeset always belongs to itself, so the changenode lookup
1237 # A changeset always belongs to itself, so the changenode lookup
1238 # function for a changenode is identity.
1238 # function for a changenode is identity.
1239 def identity(x):
1239 def identity(x):
1240 return x
1240 return x
1241
1241
1242 # A function generating function. Sets up an environment for the
1242 # A function generating function. Sets up an environment for the
1243 # inner function.
1243 # inner function.
1244 def cmp_by_rev_func(revlog):
1244 def cmp_by_rev_func(revlog):
1245 # Compare two nodes by their revision number in the environment's
1245 # Compare two nodes by their revision number in the environment's
1246 # revision history. Since the revision number both represents the
1246 # revision history. Since the revision number both represents the
1247 # most efficient order to read the nodes in, and represents a
1247 # most efficient order to read the nodes in, and represents a
1248 # topological sorting of the nodes, this function is often useful.
1248 # topological sorting of the nodes, this function is often useful.
1249 def cmp_by_rev(a, b):
1249 def cmp_by_rev(a, b):
1250 return cmp(revlog.rev(a), revlog.rev(b))
1250 return cmp(revlog.rev(a), revlog.rev(b))
1251 return cmp_by_rev
1251 return cmp_by_rev
1252
1252
1253 # If we determine that a particular file or manifest node must be a
1253 # If we determine that a particular file or manifest node must be a
1254 # node that the recipient of the changegroup will already have, we can
1254 # node that the recipient of the changegroup will already have, we can
1255 # also assume the recipient will have all the parents. This function
1255 # also assume the recipient will have all the parents. This function
1256 # prunes them from the set of missing nodes.
1256 # prunes them from the set of missing nodes.
1257 def prune_parents(revlog, hasset, msngset):
1257 def prune_parents(revlog, hasset, msngset):
1258 haslst = hasset.keys()
1258 haslst = hasset.keys()
1259 haslst.sort(cmp_by_rev_func(revlog))
1259 haslst.sort(cmp_by_rev_func(revlog))
1260 for node in haslst:
1260 for node in haslst:
1261 parentlst = [p for p in revlog.parents(node) if p != nullid]
1261 parentlst = [p for p in revlog.parents(node) if p != nullid]
1262 while parentlst:
1262 while parentlst:
1263 n = parentlst.pop()
1263 n = parentlst.pop()
1264 if n not in hasset:
1264 if n not in hasset:
1265 hasset[n] = 1
1265 hasset[n] = 1
1266 p = [p for p in revlog.parents(n) if p != nullid]
1266 p = [p for p in revlog.parents(n) if p != nullid]
1267 parentlst.extend(p)
1267 parentlst.extend(p)
1268 for n in hasset:
1268 for n in hasset:
1269 msngset.pop(n, None)
1269 msngset.pop(n, None)
1270
1270
1271 # This is a function generating function used to set up an environment
1271 # This is a function generating function used to set up an environment
1272 # for the inner function to execute in.
1272 # for the inner function to execute in.
1273 def manifest_and_file_collector(changedfileset):
1273 def manifest_and_file_collector(changedfileset):
1274 # This is an information gathering function that gathers
1274 # This is an information gathering function that gathers
1275 # information from each changeset node that goes out as part of
1275 # information from each changeset node that goes out as part of
1276 # the changegroup. The information gathered is a list of which
1276 # the changegroup. The information gathered is a list of which
1277 # manifest nodes are potentially required (the recipient may
1277 # manifest nodes are potentially required (the recipient may
1278 # already have them) and total list of all files which were
1278 # already have them) and total list of all files which were
1279 # changed in any changeset in the changegroup.
1279 # changed in any changeset in the changegroup.
1280 #
1280 #
1281 # We also remember the first changenode we saw any manifest
1281 # We also remember the first changenode we saw any manifest
1282 # referenced by so we can later determine which changenode 'owns'
1282 # referenced by so we can later determine which changenode 'owns'
1283 # the manifest.
1283 # the manifest.
1284 def collect_manifests_and_files(clnode):
1284 def collect_manifests_and_files(clnode):
1285 c = cl.read(clnode)
1285 c = cl.read(clnode)
1286 for f in c[3]:
1286 for f in c[3]:
1287 # This is to make sure we only have one instance of each
1287 # This is to make sure we only have one instance of each
1288 # filename string for each filename.
1288 # filename string for each filename.
1289 changedfileset.setdefault(f, f)
1289 changedfileset.setdefault(f, f)
1290 msng_mnfst_set.setdefault(c[0], clnode)
1290 msng_mnfst_set.setdefault(c[0], clnode)
1291 return collect_manifests_and_files
1291 return collect_manifests_and_files
1292
1292
1293 # Figure out which manifest nodes (of the ones we think might be part
1293 # Figure out which manifest nodes (of the ones we think might be part
1294 # of the changegroup) the recipient must know about and remove them
1294 # of the changegroup) the recipient must know about and remove them
1295 # from the changegroup.
1295 # from the changegroup.
1296 def prune_manifests():
1296 def prune_manifests():
1297 has_mnfst_set = {}
1297 has_mnfst_set = {}
1298 for n in msng_mnfst_set:
1298 for n in msng_mnfst_set:
1299 # If a 'missing' manifest thinks it belongs to a changenode
1299 # If a 'missing' manifest thinks it belongs to a changenode
1300 # the recipient is assumed to have, obviously the recipient
1300 # the recipient is assumed to have, obviously the recipient
1301 # must have that manifest.
1301 # must have that manifest.
1302 linknode = cl.node(mnfst.linkrev(n))
1302 linknode = cl.node(mnfst.linkrev(n))
1303 if linknode in has_cl_set:
1303 if linknode in has_cl_set:
1304 has_mnfst_set[n] = 1
1304 has_mnfst_set[n] = 1
1305 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1305 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1306
1306
1307 # Use the information collected in collect_manifests_and_files to say
1307 # Use the information collected in collect_manifests_and_files to say
1308 # which changenode any manifestnode belongs to.
1308 # which changenode any manifestnode belongs to.
1309 def lookup_manifest_link(mnfstnode):
1309 def lookup_manifest_link(mnfstnode):
1310 return msng_mnfst_set[mnfstnode]
1310 return msng_mnfst_set[mnfstnode]
1311
1311
1312 # A function generating function that sets up the initial environment
1312 # A function generating function that sets up the initial environment
1313 # the inner function.
1313 # the inner function.
1314 def filenode_collector(changedfiles):
1314 def filenode_collector(changedfiles):
1315 next_rev = [0]
1315 next_rev = [0]
1316 # This gathers information from each manifestnode included in the
1316 # This gathers information from each manifestnode included in the
1317 # changegroup about which filenodes the manifest node references
1317 # changegroup about which filenodes the manifest node references
1318 # so we can include those in the changegroup too.
1318 # so we can include those in the changegroup too.
1319 #
1319 #
1320 # It also remembers which changenode each filenode belongs to. It
1320 # It also remembers which changenode each filenode belongs to. It
1321 # does this by assuming the a filenode belongs to the changenode
1321 # does this by assuming the a filenode belongs to the changenode
1322 # the first manifest that references it belongs to.
1322 # the first manifest that references it belongs to.
1323 def collect_msng_filenodes(mnfstnode):
1323 def collect_msng_filenodes(mnfstnode):
1324 r = mnfst.rev(mnfstnode)
1324 r = mnfst.rev(mnfstnode)
1325 if r == next_rev[0]:
1325 if r == next_rev[0]:
1326 # If the last rev we looked at was the one just previous,
1326 # If the last rev we looked at was the one just previous,
1327 # we only need to see a diff.
1327 # we only need to see a diff.
1328 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1328 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1329 # For each line in the delta
1329 # For each line in the delta
1330 for dline in delta.splitlines():
1330 for dline in delta.splitlines():
1331 # get the filename and filenode for that line
1331 # get the filename and filenode for that line
1332 f, fnode = dline.split('\0')
1332 f, fnode = dline.split('\0')
1333 fnode = bin(fnode[:40])
1333 fnode = bin(fnode[:40])
1334 f = changedfiles.get(f, None)
1334 f = changedfiles.get(f, None)
1335 # And if the file is in the list of files we care
1335 # And if the file is in the list of files we care
1336 # about.
1336 # about.
1337 if f is not None:
1337 if f is not None:
1338 # Get the changenode this manifest belongs to
1338 # Get the changenode this manifest belongs to
1339 clnode = msng_mnfst_set[mnfstnode]
1339 clnode = msng_mnfst_set[mnfstnode]
1340 # Create the set of filenodes for the file if
1340 # Create the set of filenodes for the file if
1341 # there isn't one already.
1341 # there isn't one already.
1342 ndset = msng_filenode_set.setdefault(f, {})
1342 ndset = msng_filenode_set.setdefault(f, {})
1343 # And set the filenode's changelog node to the
1343 # And set the filenode's changelog node to the
1344 # manifest's if it hasn't been set already.
1344 # manifest's if it hasn't been set already.
1345 ndset.setdefault(fnode, clnode)
1345 ndset.setdefault(fnode, clnode)
1346 else:
1346 else:
1347 # Otherwise we need a full manifest.
1347 # Otherwise we need a full manifest.
1348 m = mnfst.read(mnfstnode)
1348 m = mnfst.read(mnfstnode)
1349 # For every file in we care about.
1349 # For every file in we care about.
1350 for f in changedfiles:
1350 for f in changedfiles:
1351 fnode = m.get(f, None)
1351 fnode = m.get(f, None)
1352 # If it's in the manifest
1352 # If it's in the manifest
1353 if fnode is not None:
1353 if fnode is not None:
1354 # See comments above.
1354 # See comments above.
1355 clnode = msng_mnfst_set[mnfstnode]
1355 clnode = msng_mnfst_set[mnfstnode]
1356 ndset = msng_filenode_set.setdefault(f, {})
1356 ndset = msng_filenode_set.setdefault(f, {})
1357 ndset.setdefault(fnode, clnode)
1357 ndset.setdefault(fnode, clnode)
1358 # Remember the revision we hope to see next.
1358 # Remember the revision we hope to see next.
1359 next_rev[0] = r + 1
1359 next_rev[0] = r + 1
1360 return collect_msng_filenodes
1360 return collect_msng_filenodes
1361
1361
1362 # We have a list of filenodes we think we need for a file, lets remove
1362 # We have a list of filenodes we think we need for a file, lets remove
1363 # all those we now the recipient must have.
1363 # all those we now the recipient must have.
1364 def prune_filenodes(f, filerevlog):
1364 def prune_filenodes(f, filerevlog):
1365 msngset = msng_filenode_set[f]
1365 msngset = msng_filenode_set[f]
1366 hasset = {}
1366 hasset = {}
1367 # If a 'missing' filenode thinks it belongs to a changenode we
1367 # If a 'missing' filenode thinks it belongs to a changenode we
1368 # assume the recipient must have, then the recipient must have
1368 # assume the recipient must have, then the recipient must have
1369 # that filenode.
1369 # that filenode.
1370 for n in msngset:
1370 for n in msngset:
1371 clnode = cl.node(filerevlog.linkrev(n))
1371 clnode = cl.node(filerevlog.linkrev(n))
1372 if clnode in has_cl_set:
1372 if clnode in has_cl_set:
1373 hasset[n] = 1
1373 hasset[n] = 1
1374 prune_parents(filerevlog, hasset, msngset)
1374 prune_parents(filerevlog, hasset, msngset)
1375
1375
1376 # A function generator function that sets up the a context for the
1376 # A function generator function that sets up the a context for the
1377 # inner function.
1377 # inner function.
1378 def lookup_filenode_link_func(fname):
1378 def lookup_filenode_link_func(fname):
1379 msngset = msng_filenode_set[fname]
1379 msngset = msng_filenode_set[fname]
1380 # Lookup the changenode the filenode belongs to.
1380 # Lookup the changenode the filenode belongs to.
1381 def lookup_filenode_link(fnode):
1381 def lookup_filenode_link(fnode):
1382 return msngset[fnode]
1382 return msngset[fnode]
1383 return lookup_filenode_link
1383 return lookup_filenode_link
1384
1384
1385 # Now that we have all theses utility functions to help out and
1385 # Now that we have all theses utility functions to help out and
1386 # logically divide up the task, generate the group.
1386 # logically divide up the task, generate the group.
1387 def gengroup():
1387 def gengroup():
1388 # The set of changed files starts empty.
1388 # The set of changed files starts empty.
1389 changedfiles = {}
1389 changedfiles = {}
1390 # Create a changenode group generator that will call our functions
1390 # Create a changenode group generator that will call our functions
1391 # back to lookup the owning changenode and collect information.
1391 # back to lookup the owning changenode and collect information.
1392 group = cl.group(msng_cl_lst, identity,
1392 group = cl.group(msng_cl_lst, identity,
1393 manifest_and_file_collector(changedfiles))
1393 manifest_and_file_collector(changedfiles))
1394 for chnk in group:
1394 for chnk in group:
1395 yield chnk
1395 yield chnk
1396
1396
1397 # The list of manifests has been collected by the generator
1397 # The list of manifests has been collected by the generator
1398 # calling our functions back.
1398 # calling our functions back.
1399 prune_manifests()
1399 prune_manifests()
1400 msng_mnfst_lst = msng_mnfst_set.keys()
1400 msng_mnfst_lst = msng_mnfst_set.keys()
1401 # Sort the manifestnodes by revision number.
1401 # Sort the manifestnodes by revision number.
1402 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1402 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1403 # Create a generator for the manifestnodes that calls our lookup
1403 # Create a generator for the manifestnodes that calls our lookup
1404 # and data collection functions back.
1404 # and data collection functions back.
1405 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1405 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1406 filenode_collector(changedfiles))
1406 filenode_collector(changedfiles))
1407 for chnk in group:
1407 for chnk in group:
1408 yield chnk
1408 yield chnk
1409
1409
1410 # These are no longer needed, dereference and toss the memory for
1410 # These are no longer needed, dereference and toss the memory for
1411 # them.
1411 # them.
1412 msng_mnfst_lst = None
1412 msng_mnfst_lst = None
1413 msng_mnfst_set.clear()
1413 msng_mnfst_set.clear()
1414
1414
1415 changedfiles = changedfiles.keys()
1415 changedfiles = changedfiles.keys()
1416 changedfiles.sort()
1416 changedfiles.sort()
1417 # Go through all our files in order sorted by name.
1417 # Go through all our files in order sorted by name.
1418 for fname in changedfiles:
1418 for fname in changedfiles:
1419 filerevlog = self.file(fname)
1419 filerevlog = self.file(fname)
1420 # Toss out the filenodes that the recipient isn't really
1420 # Toss out the filenodes that the recipient isn't really
1421 # missing.
1421 # missing.
1422 if msng_filenode_set.has_key(fname):
1422 if msng_filenode_set.has_key(fname):
1423 prune_filenodes(fname, filerevlog)
1423 prune_filenodes(fname, filerevlog)
1424 msng_filenode_lst = msng_filenode_set[fname].keys()
1424 msng_filenode_lst = msng_filenode_set[fname].keys()
1425 else:
1425 else:
1426 msng_filenode_lst = []
1426 msng_filenode_lst = []
1427 # If any filenodes are left, generate the group for them,
1427 # If any filenodes are left, generate the group for them,
1428 # otherwise don't bother.
1428 # otherwise don't bother.
1429 if len(msng_filenode_lst) > 0:
1429 if len(msng_filenode_lst) > 0:
1430 yield changegroup.genchunk(fname)
1430 yield changegroup.genchunk(fname)
1431 # Sort the filenodes by their revision #
1431 # Sort the filenodes by their revision #
1432 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1432 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1433 # Create a group generator and only pass in a changenode
1433 # Create a group generator and only pass in a changenode
1434 # lookup function as we need to collect no information
1434 # lookup function as we need to collect no information
1435 # from filenodes.
1435 # from filenodes.
1436 group = filerevlog.group(msng_filenode_lst,
1436 group = filerevlog.group(msng_filenode_lst,
1437 lookup_filenode_link_func(fname))
1437 lookup_filenode_link_func(fname))
1438 for chnk in group:
1438 for chnk in group:
1439 yield chnk
1439 yield chnk
1440 if msng_filenode_set.has_key(fname):
1440 if msng_filenode_set.has_key(fname):
1441 # Don't need this anymore, toss it to free memory.
1441 # Don't need this anymore, toss it to free memory.
1442 del msng_filenode_set[fname]
1442 del msng_filenode_set[fname]
1443 # Signal that no more groups are left.
1443 # Signal that no more groups are left.
1444 yield changegroup.closechunk()
1444 yield changegroup.closechunk()
1445
1445
1446 if msng_cl_lst:
1446 if msng_cl_lst:
1447 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1447 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1448
1448
1449 return util.chunkbuffer(gengroup())
1449 return util.chunkbuffer(gengroup())
1450
1450
1451 def changegroup(self, basenodes, source):
1451 def changegroup(self, basenodes, source):
1452 """Generate a changegroup of all nodes that we have that a recipient
1452 """Generate a changegroup of all nodes that we have that a recipient
1453 doesn't.
1453 doesn't.
1454
1454
1455 This is much easier than the previous function as we can assume that
1455 This is much easier than the previous function as we can assume that
1456 the recipient has any changenode we aren't sending them."""
1456 the recipient has any changenode we aren't sending them."""
1457
1457
1458 self.hook('preoutgoing', throw=True, source=source)
1458 self.hook('preoutgoing', throw=True, source=source)
1459
1459
1460 cl = self.changelog
1460 cl = self.changelog
1461 nodes = cl.nodesbetween(basenodes, None)[0]
1461 nodes = cl.nodesbetween(basenodes, None)[0]
1462 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1462 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1463
1463
1464 def identity(x):
1464 def identity(x):
1465 return x
1465 return x
1466
1466
1467 def gennodelst(revlog):
1467 def gennodelst(revlog):
1468 for r in xrange(0, revlog.count()):
1468 for r in xrange(0, revlog.count()):
1469 n = revlog.node(r)
1469 n = revlog.node(r)
1470 if revlog.linkrev(n) in revset:
1470 if revlog.linkrev(n) in revset:
1471 yield n
1471 yield n
1472
1472
1473 def changed_file_collector(changedfileset):
1473 def changed_file_collector(changedfileset):
1474 def collect_changed_files(clnode):
1474 def collect_changed_files(clnode):
1475 c = cl.read(clnode)
1475 c = cl.read(clnode)
1476 for fname in c[3]:
1476 for fname in c[3]:
1477 changedfileset[fname] = 1
1477 changedfileset[fname] = 1
1478 return collect_changed_files
1478 return collect_changed_files
1479
1479
1480 def lookuprevlink_func(revlog):
1480 def lookuprevlink_func(revlog):
1481 def lookuprevlink(n):
1481 def lookuprevlink(n):
1482 return cl.node(revlog.linkrev(n))
1482 return cl.node(revlog.linkrev(n))
1483 return lookuprevlink
1483 return lookuprevlink
1484
1484
1485 def gengroup():
1485 def gengroup():
1486 # construct a list of all changed files
1486 # construct a list of all changed files
1487 changedfiles = {}
1487 changedfiles = {}
1488
1488
1489 for chnk in cl.group(nodes, identity,
1489 for chnk in cl.group(nodes, identity,
1490 changed_file_collector(changedfiles)):
1490 changed_file_collector(changedfiles)):
1491 yield chnk
1491 yield chnk
1492 changedfiles = changedfiles.keys()
1492 changedfiles = changedfiles.keys()
1493 changedfiles.sort()
1493 changedfiles.sort()
1494
1494
1495 mnfst = self.manifest
1495 mnfst = self.manifest
1496 nodeiter = gennodelst(mnfst)
1496 nodeiter = gennodelst(mnfst)
1497 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1497 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1498 yield chnk
1498 yield chnk
1499
1499
1500 for fname in changedfiles:
1500 for fname in changedfiles:
1501 filerevlog = self.file(fname)
1501 filerevlog = self.file(fname)
1502 nodeiter = gennodelst(filerevlog)
1502 nodeiter = gennodelst(filerevlog)
1503 nodeiter = list(nodeiter)
1503 nodeiter = list(nodeiter)
1504 if nodeiter:
1504 if nodeiter:
1505 yield changegroup.genchunk(fname)
1505 yield changegroup.genchunk(fname)
1506 lookup = lookuprevlink_func(filerevlog)
1506 lookup = lookuprevlink_func(filerevlog)
1507 for chnk in filerevlog.group(nodeiter, lookup):
1507 for chnk in filerevlog.group(nodeiter, lookup):
1508 yield chnk
1508 yield chnk
1509
1509
1510 yield changegroup.closechunk()
1510 yield changegroup.closechunk()
1511
1511
1512 if nodes:
1512 if nodes:
1513 self.hook('outgoing', node=hex(nodes[0]), source=source)
1513 self.hook('outgoing', node=hex(nodes[0]), source=source)
1514
1514
1515 return util.chunkbuffer(gengroup())
1515 return util.chunkbuffer(gengroup())
1516
1516
1517 def addchangegroup(self, source, srctype):
1517 def addchangegroup(self, source, srctype):
1518 """add changegroup to repo.
1518 """add changegroup to repo.
1519 returns number of heads modified or added + 1."""
1519 returns number of heads modified or added + 1."""
1520
1520
1521 def csmap(x):
1521 def csmap(x):
1522 self.ui.debug(_("add changeset %s\n") % short(x))
1522 self.ui.debug(_("add changeset %s\n") % short(x))
1523 return cl.count()
1523 return cl.count()
1524
1524
1525 def revmap(x):
1525 def revmap(x):
1526 return cl.rev(x)
1526 return cl.rev(x)
1527
1527
1528 if not source:
1528 if not source:
1529 return 0
1529 return 0
1530
1530
1531 self.hook('prechangegroup', throw=True, source=srctype)
1531 self.hook('prechangegroup', throw=True, source=srctype)
1532
1532
1533 changesets = files = revisions = 0
1533 changesets = files = revisions = 0
1534
1534
1535 tr = self.transaction()
1535 tr = self.transaction()
1536
1536
1537 # write changelog data to temp files so concurrent readers will not see
1537 # write changelog data to temp files so concurrent readers will not see
1538 # inconsistent view
1538 # inconsistent view
1539 cl = None
1539 cl = None
1540 try:
1540 try:
1541 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1541 cl = appendfile.appendchangelog(self.opener, self.changelog.version)
1542
1542
1543 oldheads = len(cl.heads())
1543 oldheads = len(cl.heads())
1544
1544
1545 # pull off the changeset group
1545 # pull off the changeset group
1546 self.ui.status(_("adding changesets\n"))
1546 self.ui.status(_("adding changesets\n"))
1547 cor = cl.count() - 1
1547 cor = cl.count() - 1
1548 chunkiter = changegroup.chunkiter(source)
1548 chunkiter = changegroup.chunkiter(source)
1549 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1549 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1550 raise util.Abort(_("received changelog group is empty"))
1550 raise util.Abort(_("received changelog group is empty"))
1551 cnr = cl.count() - 1
1551 cnr = cl.count() - 1
1552 changesets = cnr - cor
1552 changesets = cnr - cor
1553
1553
1554 # pull off the manifest group
1554 # pull off the manifest group
1555 self.ui.status(_("adding manifests\n"))
1555 self.ui.status(_("adding manifests\n"))
1556 chunkiter = changegroup.chunkiter(source)
1556 chunkiter = changegroup.chunkiter(source)
1557 # no need to check for empty manifest group here:
1557 # no need to check for empty manifest group here:
1558 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1558 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1559 # no new manifest will be created and the manifest group will
1559 # no new manifest will be created and the manifest group will
1560 # be empty during the pull
1560 # be empty during the pull
1561 self.manifest.addgroup(chunkiter, revmap, tr)
1561 self.manifest.addgroup(chunkiter, revmap, tr)
1562
1562
1563 # process the files
1563 # process the files
1564 self.ui.status(_("adding file changes\n"))
1564 self.ui.status(_("adding file changes\n"))
1565 while 1:
1565 while 1:
1566 f = changegroup.getchunk(source)
1566 f = changegroup.getchunk(source)
1567 if not f:
1567 if not f:
1568 break
1568 break
1569 self.ui.debug(_("adding %s revisions\n") % f)
1569 self.ui.debug(_("adding %s revisions\n") % f)
1570 fl = self.file(f)
1570 fl = self.file(f)
1571 o = fl.count()
1571 o = fl.count()
1572 chunkiter = changegroup.chunkiter(source)
1572 chunkiter = changegroup.chunkiter(source)
1573 if fl.addgroup(chunkiter, revmap, tr) is None:
1573 if fl.addgroup(chunkiter, revmap, tr) is None:
1574 raise util.Abort(_("received file revlog group is empty"))
1574 raise util.Abort(_("received file revlog group is empty"))
1575 revisions += fl.count() - o
1575 revisions += fl.count() - o
1576 files += 1
1576 files += 1
1577
1577
1578 cl.writedata()
1578 cl.writedata()
1579 finally:
1579 finally:
1580 if cl:
1580 if cl:
1581 cl.cleanup()
1581 cl.cleanup()
1582
1582
1583 # make changelog see real files again
1583 # make changelog see real files again
1584 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1584 self.changelog = changelog.changelog(self.opener, self.changelog.version)
1585 self.changelog.checkinlinesize(tr)
1585 self.changelog.checkinlinesize(tr)
1586
1586
1587 newheads = len(self.changelog.heads())
1587 newheads = len(self.changelog.heads())
1588 heads = ""
1588 heads = ""
1589 if oldheads and newheads != oldheads:
1589 if oldheads and newheads != oldheads:
1590 heads = _(" (%+d heads)") % (newheads - oldheads)
1590 heads = _(" (%+d heads)") % (newheads - oldheads)
1591
1591
1592 self.ui.status(_("added %d changesets"
1592 self.ui.status(_("added %d changesets"
1593 " with %d changes to %d files%s\n")
1593 " with %d changes to %d files%s\n")
1594 % (changesets, revisions, files, heads))
1594 % (changesets, revisions, files, heads))
1595
1595
1596 if changesets > 0:
1596 if changesets > 0:
1597 self.hook('pretxnchangegroup', throw=True,
1597 self.hook('pretxnchangegroup', throw=True,
1598 node=hex(self.changelog.node(cor+1)), source=srctype)
1598 node=hex(self.changelog.node(cor+1)), source=srctype)
1599
1599
1600 tr.close()
1600 tr.close()
1601
1601
1602 if changesets > 0:
1602 if changesets > 0:
1603 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1603 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1604 source=srctype)
1604 source=srctype)
1605
1605
1606 for i in range(cor + 1, cnr + 1):
1606 for i in range(cor + 1, cnr + 1):
1607 self.hook("incoming", node=hex(self.changelog.node(i)),
1607 self.hook("incoming", node=hex(self.changelog.node(i)),
1608 source=srctype)
1608 source=srctype)
1609
1609
1610 return newheads - oldheads + 1
1610 return newheads - oldheads + 1
1611
1611
1612 def update(self, node, allow=False, force=False, choose=None,
1612 def update(self, node, allow=False, force=False, choose=None,
1613 moddirstate=True, forcemerge=False, wlock=None, show_stats=True):
1613 moddirstate=True, forcemerge=False, wlock=None, show_stats=True):
1614 pl = self.dirstate.parents()
1614 pl = self.dirstate.parents()
1615 if not force and pl[1] != nullid:
1615 if not force and pl[1] != nullid:
1616 raise util.Abort(_("outstanding uncommitted merges"))
1616 raise util.Abort(_("outstanding uncommitted merges"))
1617
1617
1618 err = False
1618 err = False
1619
1619
1620 p1, p2 = pl[0], node
1620 p1, p2 = pl[0], node
1621 pa = self.changelog.ancestor(p1, p2)
1621 pa = self.changelog.ancestor(p1, p2)
1622 m1n = self.changelog.read(p1)[0]
1622 m1n = self.changelog.read(p1)[0]
1623 m2n = self.changelog.read(p2)[0]
1623 m2n = self.changelog.read(p2)[0]
1624 man = self.manifest.ancestor(m1n, m2n)
1624 man = self.manifest.ancestor(m1n, m2n)
1625 m1 = self.manifest.read(m1n)
1625 m1 = self.manifest.read(m1n)
1626 mf1 = self.manifest.readflags(m1n)
1626 mf1 = self.manifest.readflags(m1n)
1627 m2 = self.manifest.read(m2n).copy()
1627 m2 = self.manifest.read(m2n).copy()
1628 mf2 = self.manifest.readflags(m2n)
1628 mf2 = self.manifest.readflags(m2n)
1629 ma = self.manifest.read(man)
1629 ma = self.manifest.read(man)
1630 mfa = self.manifest.readflags(man)
1630 mfa = self.manifest.readflags(man)
1631
1631
1632 modified, added, removed, deleted, unknown = self.changes()
1632 modified, added, removed, deleted, unknown = self.changes()
1633
1633
1634 # is this a jump, or a merge? i.e. is there a linear path
1634 # is this a jump, or a merge? i.e. is there a linear path
1635 # from p1 to p2?
1635 # from p1 to p2?
1636 linear_path = (pa == p1 or pa == p2)
1636 linear_path = (pa == p1 or pa == p2)
1637
1637
1638 if allow and linear_path:
1638 if allow and linear_path:
1639 raise util.Abort(_("there is nothing to merge, "
1639 raise util.Abort(_("there is nothing to merge, "
1640 "just use 'hg update'"))
1640 "just use 'hg update'"))
1641 if allow and not forcemerge:
1641 if allow and not forcemerge:
1642 if modified or added or removed:
1642 if modified or added or removed:
1643 raise util.Abort(_("outstanding uncommitted changes"))
1643 raise util.Abort(_("outstanding uncommitted changes"))
1644
1644
1645 if not forcemerge and not force:
1645 if not forcemerge and not force:
1646 for f in unknown:
1646 for f in unknown:
1647 if f in m2:
1647 if f in m2:
1648 t1 = self.wread(f)
1648 t1 = self.wread(f)
1649 t2 = self.file(f).read(m2[f])
1649 t2 = self.file(f).read(m2[f])
1650 if cmp(t1, t2) != 0:
1650 if cmp(t1, t2) != 0:
1651 raise util.Abort(_("'%s' already exists in the working"
1651 raise util.Abort(_("'%s' already exists in the working"
1652 " dir and differs from remote") % f)
1652 " dir and differs from remote") % f)
1653
1653
1654 # resolve the manifest to determine which files
1654 # resolve the manifest to determine which files
1655 # we care about merging
1655 # we care about merging
1656 self.ui.note(_("resolving manifests\n"))
1656 self.ui.note(_("resolving manifests\n"))
1657 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1657 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1658 (force, allow, moddirstate, linear_path))
1658 (force, allow, moddirstate, linear_path))
1659 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1659 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1660 (short(man), short(m1n), short(m2n)))
1660 (short(man), short(m1n), short(m2n)))
1661
1661
1662 merge = {}
1662 merge = {}
1663 get = {}
1663 get = {}
1664 remove = []
1664 remove = []
1665
1665
1666 # construct a working dir manifest
1666 # construct a working dir manifest
1667 mw = m1.copy()
1667 mw = m1.copy()
1668 mfw = mf1.copy()
1668 mfw = mf1.copy()
1669 umap = dict.fromkeys(unknown)
1669 umap = dict.fromkeys(unknown)
1670
1670
1671 for f in added + modified + unknown:
1671 for f in added + modified + unknown:
1672 mw[f] = ""
1672 mw[f] = ""
1673 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1673 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1674
1674
1675 if moddirstate and not wlock:
1675 if moddirstate and not wlock:
1676 wlock = self.wlock()
1676 wlock = self.wlock()
1677
1677
1678 for f in deleted + removed:
1678 for f in deleted + removed:
1679 if f in mw:
1679 if f in mw:
1680 del mw[f]
1680 del mw[f]
1681
1681
1682 # If we're jumping between revisions (as opposed to merging),
1682 # If we're jumping between revisions (as opposed to merging),
1683 # and if neither the working directory nor the target rev has
1683 # and if neither the working directory nor the target rev has
1684 # the file, then we need to remove it from the dirstate, to
1684 # the file, then we need to remove it from the dirstate, to
1685 # prevent the dirstate from listing the file when it is no
1685 # prevent the dirstate from listing the file when it is no
1686 # longer in the manifest.
1686 # longer in the manifest.
1687 if moddirstate and linear_path and f not in m2:
1687 if moddirstate and linear_path and f not in m2:
1688 self.dirstate.forget((f,))
1688 self.dirstate.forget((f,))
1689
1689
1690 # Compare manifests
1690 # Compare manifests
1691 for f, n in mw.iteritems():
1691 for f, n in mw.iteritems():
1692 if choose and not choose(f):
1692 if choose and not choose(f):
1693 continue
1693 continue
1694 if f in m2:
1694 if f in m2:
1695 s = 0
1695 s = 0
1696
1696
1697 # is the wfile new since m1, and match m2?
1697 # is the wfile new since m1, and match m2?
1698 if f not in m1:
1698 if f not in m1:
1699 t1 = self.wread(f)
1699 t1 = self.wread(f)
1700 t2 = self.file(f).read(m2[f])
1700 t2 = self.file(f).read(m2[f])
1701 if cmp(t1, t2) == 0:
1701 if cmp(t1, t2) == 0:
1702 n = m2[f]
1702 n = m2[f]
1703 del t1, t2
1703 del t1, t2
1704
1704
1705 # are files different?
1705 # are files different?
1706 if n != m2[f]:
1706 if n != m2[f]:
1707 a = ma.get(f, nullid)
1707 a = ma.get(f, nullid)
1708 # are both different from the ancestor?
1708 # are both different from the ancestor?
1709 if n != a and m2[f] != a:
1709 if n != a and m2[f] != a:
1710 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1710 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1711 # merge executable bits
1711 # merge executable bits
1712 # "if we changed or they changed, change in merge"
1712 # "if we changed or they changed, change in merge"
1713 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1713 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1714 mode = ((a^b) | (a^c)) ^ a
1714 mode = ((a^b) | (a^c)) ^ a
1715 merge[f] = (m1.get(f, nullid), m2[f], mode)
1715 merge[f] = (m1.get(f, nullid), m2[f], mode)
1716 s = 1
1716 s = 1
1717 # are we clobbering?
1717 # are we clobbering?
1718 # is remote's version newer?
1718 # is remote's version newer?
1719 # or are we going back in time?
1719 # or are we going back in time?
1720 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1720 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1721 self.ui.debug(_(" remote %s is newer, get\n") % f)
1721 self.ui.debug(_(" remote %s is newer, get\n") % f)
1722 get[f] = m2[f]
1722 get[f] = m2[f]
1723 s = 1
1723 s = 1
1724 elif f in umap or f in added:
1724 elif f in umap or f in added:
1725 # this unknown file is the same as the checkout
1725 # this unknown file is the same as the checkout
1726 # we need to reset the dirstate if the file was added
1726 # we need to reset the dirstate if the file was added
1727 get[f] = m2[f]
1727 get[f] = m2[f]
1728
1728
1729 if not s and mfw[f] != mf2[f]:
1729 if not s and mfw[f] != mf2[f]:
1730 if force:
1730 if force:
1731 self.ui.debug(_(" updating permissions for %s\n") % f)
1731 self.ui.debug(_(" updating permissions for %s\n") % f)
1732 util.set_exec(self.wjoin(f), mf2[f])
1732 util.set_exec(self.wjoin(f), mf2[f])
1733 else:
1733 else:
1734 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1734 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1735 mode = ((a^b) | (a^c)) ^ a
1735 mode = ((a^b) | (a^c)) ^ a
1736 if mode != b:
1736 if mode != b:
1737 self.ui.debug(_(" updating permissions for %s\n")
1737 self.ui.debug(_(" updating permissions for %s\n")
1738 % f)
1738 % f)
1739 util.set_exec(self.wjoin(f), mode)
1739 util.set_exec(self.wjoin(f), mode)
1740 del m2[f]
1740 del m2[f]
1741 elif f in ma:
1741 elif f in ma:
1742 if n != ma[f]:
1742 if n != ma[f]:
1743 r = _("d")
1743 r = _("d")
1744 if not force and (linear_path or allow):
1744 if not force and (linear_path or allow):
1745 r = self.ui.prompt(
1745 r = self.ui.prompt(
1746 (_(" local changed %s which remote deleted\n") % f) +
1746 (_(" local changed %s which remote deleted\n") % f) +
1747 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1747 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1748 if r == _("d"):
1748 if r == _("d"):
1749 remove.append(f)
1749 remove.append(f)
1750 else:
1750 else:
1751 self.ui.debug(_("other deleted %s\n") % f)
1751 self.ui.debug(_("other deleted %s\n") % f)
1752 remove.append(f) # other deleted it
1752 remove.append(f) # other deleted it
1753 else:
1753 else:
1754 # file is created on branch or in working directory
1754 # file is created on branch or in working directory
1755 if force and f not in umap:
1755 if force and f not in umap:
1756 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1756 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1757 remove.append(f)
1757 remove.append(f)
1758 elif n == m1.get(f, nullid): # same as parent
1758 elif n == m1.get(f, nullid): # same as parent
1759 if p2 == pa: # going backwards?
1759 if p2 == pa: # going backwards?
1760 self.ui.debug(_("remote deleted %s\n") % f)
1760 self.ui.debug(_("remote deleted %s\n") % f)
1761 remove.append(f)
1761 remove.append(f)
1762 else:
1762 else:
1763 self.ui.debug(_("local modified %s, keeping\n") % f)
1763 self.ui.debug(_("local modified %s, keeping\n") % f)
1764 else:
1764 else:
1765 self.ui.debug(_("working dir created %s, keeping\n") % f)
1765 self.ui.debug(_("working dir created %s, keeping\n") % f)
1766
1766
1767 for f, n in m2.iteritems():
1767 for f, n in m2.iteritems():
1768 if choose and not choose(f):
1768 if choose and not choose(f):
1769 continue
1769 continue
1770 if f[0] == "/":
1770 if f[0] == "/":
1771 continue
1771 continue
1772 if f in ma and n != ma[f]:
1772 if f in ma and n != ma[f]:
1773 r = _("k")
1773 r = _("k")
1774 if not force and (linear_path or allow):
1774 if not force and (linear_path or allow):
1775 r = self.ui.prompt(
1775 r = self.ui.prompt(
1776 (_("remote changed %s which local deleted\n") % f) +
1776 (_("remote changed %s which local deleted\n") % f) +
1777 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1777 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1778 if r == _("k"):
1778 if r == _("k"):
1779 get[f] = n
1779 get[f] = n
1780 elif f not in ma:
1780 elif f not in ma:
1781 self.ui.debug(_("remote created %s\n") % f)
1781 self.ui.debug(_("remote created %s\n") % f)
1782 get[f] = n
1782 get[f] = n
1783 else:
1783 else:
1784 if force or p2 == pa: # going backwards?
1784 if force or p2 == pa: # going backwards?
1785 self.ui.debug(_("local deleted %s, recreating\n") % f)
1785 self.ui.debug(_("local deleted %s, recreating\n") % f)
1786 get[f] = n
1786 get[f] = n
1787 else:
1787 else:
1788 self.ui.debug(_("local deleted %s\n") % f)
1788 self.ui.debug(_("local deleted %s\n") % f)
1789
1789
1790 del mw, m1, m2, ma
1790 del mw, m1, m2, ma
1791
1791
1792 if force:
1792 if force:
1793 for f in merge:
1793 for f in merge:
1794 get[f] = merge[f][1]
1794 get[f] = merge[f][1]
1795 merge = {}
1795 merge = {}
1796
1796
1797 if linear_path or force:
1797 if linear_path or force:
1798 # we don't need to do any magic, just jump to the new rev
1798 # we don't need to do any magic, just jump to the new rev
1799 branch_merge = False
1799 branch_merge = False
1800 p1, p2 = p2, nullid
1800 p1, p2 = p2, nullid
1801 else:
1801 else:
1802 if not allow:
1802 if not allow:
1803 self.ui.status(_("this update spans a branch"
1803 self.ui.status(_("this update spans a branch"
1804 " affecting the following files:\n"))
1804 " affecting the following files:\n"))
1805 fl = merge.keys() + get.keys()
1805 fl = merge.keys() + get.keys()
1806 fl.sort()
1806 fl.sort()
1807 for f in fl:
1807 for f in fl:
1808 cf = ""
1808 cf = ""
1809 if f in merge:
1809 if f in merge:
1810 cf = _(" (resolve)")
1810 cf = _(" (resolve)")
1811 self.ui.status(" %s%s\n" % (f, cf))
1811 self.ui.status(" %s%s\n" % (f, cf))
1812 self.ui.warn(_("aborting update spanning branches!\n"))
1812 self.ui.warn(_("aborting update spanning branches!\n"))
1813 self.ui.status(_("(use 'hg merge' to merge across branches"
1813 self.ui.status(_("(use 'hg merge' to merge across branches"
1814 " or 'hg update -C' to lose changes)\n"))
1814 " or 'hg update -C' to lose changes)\n"))
1815 return 1
1815 return 1
1816 branch_merge = True
1816 branch_merge = True
1817
1817
1818 xp1 = hex(p1)
1818 xp1 = hex(p1)
1819 xp2 = hex(p2)
1819 xp2 = hex(p2)
1820 if p2 == nullid: xxp2 = ''
1820 if p2 == nullid: xxp2 = ''
1821 else: xxp2 = xp2
1821 else: xxp2 = xp2
1822
1822
1823 self.hook('preupdate', throw=True, parent1=xp1, parent2=xxp2)
1823 self.hook('preupdate', throw=True, parent1=xp1, parent2=xxp2)
1824
1824
1825 # get the files we don't need to change
1825 # get the files we don't need to change
1826 files = get.keys()
1826 files = get.keys()
1827 files.sort()
1827 files.sort()
1828 for f in files:
1828 for f in files:
1829 if f[0] == "/":
1829 if f[0] == "/":
1830 continue
1830 continue
1831 self.ui.note(_("getting %s\n") % f)
1831 self.ui.note(_("getting %s\n") % f)
1832 t = self.file(f).read(get[f])
1832 t = self.file(f).read(get[f])
1833 self.wwrite(f, t)
1833 self.wwrite(f, t)
1834 util.set_exec(self.wjoin(f), mf2[f])
1834 util.set_exec(self.wjoin(f), mf2[f])
1835 if moddirstate:
1835 if moddirstate:
1836 if branch_merge:
1836 if branch_merge:
1837 self.dirstate.update([f], 'n', st_mtime=-1)
1837 self.dirstate.update([f], 'n', st_mtime=-1)
1838 else:
1838 else:
1839 self.dirstate.update([f], 'n')
1839 self.dirstate.update([f], 'n')
1840
1840
1841 # merge the tricky bits
1841 # merge the tricky bits
1842 failedmerge = []
1842 failedmerge = []
1843 files = merge.keys()
1843 files = merge.keys()
1844 files.sort()
1844 files.sort()
1845 for f in files:
1845 for f in files:
1846 self.ui.status(_("merging %s\n") % f)
1846 self.ui.status(_("merging %s\n") % f)
1847 my, other, flag = merge[f]
1847 my, other, flag = merge[f]
1848 ret = self.merge3(f, my, other, xp1, xp2)
1848 ret = self.merge3(f, my, other, xp1, xp2)
1849 if ret:
1849 if ret:
1850 err = True
1850 err = True
1851 failedmerge.append(f)
1851 failedmerge.append(f)
1852 util.set_exec(self.wjoin(f), flag)
1852 util.set_exec(self.wjoin(f), flag)
1853 if moddirstate:
1853 if moddirstate:
1854 if branch_merge:
1854 if branch_merge:
1855 # We've done a branch merge, mark this file as merged
1855 # We've done a branch merge, mark this file as merged
1856 # so that we properly record the merger later
1856 # so that we properly record the merger later
1857 self.dirstate.update([f], 'm')
1857 self.dirstate.update([f], 'm')
1858 else:
1858 else:
1859 # We've update-merged a locally modified file, so
1859 # We've update-merged a locally modified file, so
1860 # we set the dirstate to emulate a normal checkout
1860 # we set the dirstate to emulate a normal checkout
1861 # of that file some time in the past. Thus our
1861 # of that file some time in the past. Thus our
1862 # merge will appear as a normal local file
1862 # merge will appear as a normal local file
1863 # modification.
1863 # modification.
1864 f_len = len(self.file(f).read(other))
1864 f_len = len(self.file(f).read(other))
1865 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1865 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1866
1866
1867 remove.sort()
1867 remove.sort()
1868 for f in remove:
1868 for f in remove:
1869 self.ui.note(_("removing %s\n") % f)
1869 self.ui.note(_("removing %s\n") % f)
1870 util.audit_path(f)
1870 util.audit_path(f)
1871 try:
1871 try:
1872 util.unlink(self.wjoin(f))
1872 util.unlink(self.wjoin(f))
1873 except OSError, inst:
1873 except OSError, inst:
1874 if inst.errno != errno.ENOENT:
1874 if inst.errno != errno.ENOENT:
1875 self.ui.warn(_("update failed to remove %s: %s!\n") %
1875 self.ui.warn(_("update failed to remove %s: %s!\n") %
1876 (f, inst.strerror))
1876 (f, inst.strerror))
1877 if moddirstate:
1877 if moddirstate:
1878 if branch_merge:
1878 if branch_merge:
1879 self.dirstate.update(remove, 'r')
1879 self.dirstate.update(remove, 'r')
1880 else:
1880 else:
1881 self.dirstate.forget(remove)
1881 self.dirstate.forget(remove)
1882
1882
1883 if moddirstate:
1883 if moddirstate:
1884 self.dirstate.setparents(p1, p2)
1884 self.dirstate.setparents(p1, p2)
1885
1885
1886 if show_stats:
1886 if show_stats:
1887 stats = ((len(get), _("updated")),
1887 stats = ((len(get), _("updated")),
1888 (len(merge) - len(failedmerge), _("merged")),
1888 (len(merge) - len(failedmerge), _("merged")),
1889 (len(remove), _("removed")),
1889 (len(remove), _("removed")),
1890 (len(failedmerge), _("unresolved")))
1890 (len(failedmerge), _("unresolved")))
1891 note = ", ".join([_("%d files %s") % s for s in stats])
1891 note = ", ".join([_("%d files %s") % s for s in stats])
1892 self.ui.status("%s\n" % note)
1892 self.ui.status("%s\n" % note)
1893 if moddirstate:
1893 if moddirstate:
1894 if branch_merge:
1894 if branch_merge:
1895 if failedmerge:
1895 if failedmerge:
1896 self.ui.status(_("There are unresolved merges,"
1896 self.ui.status(_("There are unresolved merges,"
1897 " you can redo the full merge using:\n"
1897 " you can redo the full merge using:\n"
1898 " hg update -C %s\n"
1898 " hg update -C %s\n"
1899 " hg merge %s\n"
1899 " hg merge %s\n"
1900 % (self.changelog.rev(p1),
1900 % (self.changelog.rev(p1),
1901 self.changelog.rev(p2))))
1901 self.changelog.rev(p2))))
1902 else:
1902 else:
1903 self.ui.status(_("(branch merge, don't forget to commit)\n"))
1903 self.ui.status(_("(branch merge, don't forget to commit)\n"))
1904 elif failedmerge:
1904 elif failedmerge:
1905 self.ui.status(_("There are unresolved merges with"
1905 self.ui.status(_("There are unresolved merges with"
1906 " locally modified files.\n"))
1906 " locally modified files.\n"))
1907
1907
1908 self.hook('update', parent1=xp1, parent2=xxp2, error=int(err))
1908 self.hook('update', parent1=xp1, parent2=xxp2, error=int(err))
1909 return err
1909 return err
1910
1910
1911 def merge3(self, fn, my, other, p1, p2):
1911 def merge3(self, fn, my, other, p1, p2):
1912 """perform a 3-way merge in the working directory"""
1912 """perform a 3-way merge in the working directory"""
1913
1913
1914 def temp(prefix, node):
1914 def temp(prefix, node):
1915 pre = "%s~%s." % (os.path.basename(fn), prefix)
1915 pre = "%s~%s." % (os.path.basename(fn), prefix)
1916 (fd, name) = tempfile.mkstemp(prefix=pre)
1916 (fd, name) = tempfile.mkstemp(prefix=pre)
1917 f = os.fdopen(fd, "wb")
1917 f = os.fdopen(fd, "wb")
1918 self.wwrite(fn, fl.read(node), f)
1918 self.wwrite(fn, fl.read(node), f)
1919 f.close()
1919 f.close()
1920 return name
1920 return name
1921
1921
1922 fl = self.file(fn)
1922 fl = self.file(fn)
1923 base = fl.ancestor(my, other)
1923 base = fl.ancestor(my, other)
1924 a = self.wjoin(fn)
1924 a = self.wjoin(fn)
1925 b = temp("base", base)
1925 b = temp("base", base)
1926 c = temp("other", other)
1926 c = temp("other", other)
1927
1927
1928 self.ui.note(_("resolving %s\n") % fn)
1928 self.ui.note(_("resolving %s\n") % fn)
1929 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1929 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1930 (fn, short(my), short(other), short(base)))
1930 (fn, short(my), short(other), short(base)))
1931
1931
1932 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1932 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1933 or "hgmerge")
1933 or "hgmerge")
1934 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1934 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1935 environ={'HG_FILE': fn,
1935 environ={'HG_FILE': fn,
1936 'HG_MY_NODE': p1,
1936 'HG_MY_NODE': p1,
1937 'HG_OTHER_NODE': p2,
1937 'HG_OTHER_NODE': p2,
1938 'HG_FILE_MY_NODE': hex(my),
1938 'HG_FILE_MY_NODE': hex(my),
1939 'HG_FILE_OTHER_NODE': hex(other),
1939 'HG_FILE_OTHER_NODE': hex(other),
1940 'HG_FILE_BASE_NODE': hex(base)})
1940 'HG_FILE_BASE_NODE': hex(base)})
1941 if r:
1941 if r:
1942 self.ui.warn(_("merging %s failed!\n") % fn)
1942 self.ui.warn(_("merging %s failed!\n") % fn)
1943
1943
1944 os.unlink(b)
1944 os.unlink(b)
1945 os.unlink(c)
1945 os.unlink(c)
1946 return r
1946 return r
1947
1947
1948 def verify(self):
1948 def verify(self):
1949 filelinkrevs = {}
1949 filelinkrevs = {}
1950 filenodes = {}
1950 filenodes = {}
1951 changesets = revisions = files = 0
1951 changesets = revisions = files = 0
1952 errors = [0]
1952 errors = [0]
1953 warnings = [0]
1953 warnings = [0]
1954 neededmanifests = {}
1954 neededmanifests = {}
1955
1955
1956 def err(msg):
1956 def err(msg):
1957 self.ui.warn(msg + "\n")
1957 self.ui.warn(msg + "\n")
1958 errors[0] += 1
1958 errors[0] += 1
1959
1959
1960 def warn(msg):
1960 def warn(msg):
1961 self.ui.warn(msg + "\n")
1961 self.ui.warn(msg + "\n")
1962 warnings[0] += 1
1962 warnings[0] += 1
1963
1963
1964 def checksize(obj, name):
1964 def checksize(obj, name):
1965 d = obj.checksize()
1965 d = obj.checksize()
1966 if d[0]:
1966 if d[0]:
1967 err(_("%s data length off by %d bytes") % (name, d[0]))
1967 err(_("%s data length off by %d bytes") % (name, d[0]))
1968 if d[1]:
1968 if d[1]:
1969 err(_("%s index contains %d extra bytes") % (name, d[1]))
1969 err(_("%s index contains %d extra bytes") % (name, d[1]))
1970
1970
1971 def checkversion(obj, name):
1971 def checkversion(obj, name):
1972 if obj.version != revlog.REVLOGV0:
1972 if obj.version != revlog.REVLOGV0:
1973 if not revlogv1:
1973 if not revlogv1:
1974 warn(_("warning: `%s' uses revlog format 1") % name)
1974 warn(_("warning: `%s' uses revlog format 1") % name)
1975 elif revlogv1:
1975 elif revlogv1:
1976 warn(_("warning: `%s' uses revlog format 0") % name)
1976 warn(_("warning: `%s' uses revlog format 0") % name)
1977
1977
1978 revlogv1 = self.revlogversion != revlog.REVLOGV0
1978 revlogv1 = self.revlogversion != revlog.REVLOGV0
1979 if self.ui.verbose or revlogv1 != self.revlogv1:
1979 if self.ui.verbose or revlogv1 != self.revlogv1:
1980 self.ui.status(_("repository uses revlog format %d\n") %
1980 self.ui.status(_("repository uses revlog format %d\n") %
1981 (revlogv1 and 1 or 0))
1981 (revlogv1 and 1 or 0))
1982
1982
1983 seen = {}
1983 seen = {}
1984 self.ui.status(_("checking changesets\n"))
1984 self.ui.status(_("checking changesets\n"))
1985 checksize(self.changelog, "changelog")
1985 checksize(self.changelog, "changelog")
1986
1986
1987 for i in range(self.changelog.count()):
1987 for i in range(self.changelog.count()):
1988 changesets += 1
1988 changesets += 1
1989 n = self.changelog.node(i)
1989 n = self.changelog.node(i)
1990 l = self.changelog.linkrev(n)
1990 l = self.changelog.linkrev(n)
1991 if l != i:
1991 if l != i:
1992 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1992 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1993 if n in seen:
1993 if n in seen:
1994 err(_("duplicate changeset at revision %d") % i)
1994 err(_("duplicate changeset at revision %d") % i)
1995 seen[n] = 1
1995 seen[n] = 1
1996
1996
1997 for p in self.changelog.parents(n):
1997 for p in self.changelog.parents(n):
1998 if p not in self.changelog.nodemap:
1998 if p not in self.changelog.nodemap:
1999 err(_("changeset %s has unknown parent %s") %
1999 err(_("changeset %s has unknown parent %s") %
2000 (short(n), short(p)))
2000 (short(n), short(p)))
2001 try:
2001 try:
2002 changes = self.changelog.read(n)
2002 changes = self.changelog.read(n)
2003 except KeyboardInterrupt:
2003 except KeyboardInterrupt:
2004 self.ui.warn(_("interrupted"))
2004 self.ui.warn(_("interrupted"))
2005 raise
2005 raise
2006 except Exception, inst:
2006 except Exception, inst:
2007 err(_("unpacking changeset %s: %s") % (short(n), inst))
2007 err(_("unpacking changeset %s: %s") % (short(n), inst))
2008 continue
2008 continue
2009
2009
2010 neededmanifests[changes[0]] = n
2010 neededmanifests[changes[0]] = n
2011
2011
2012 for f in changes[3]:
2012 for f in changes[3]:
2013 filelinkrevs.setdefault(f, []).append(i)
2013 filelinkrevs.setdefault(f, []).append(i)
2014
2014
2015 seen = {}
2015 seen = {}
2016 self.ui.status(_("checking manifests\n"))
2016 self.ui.status(_("checking manifests\n"))
2017 checkversion(self.manifest, "manifest")
2017 checkversion(self.manifest, "manifest")
2018 checksize(self.manifest, "manifest")
2018 checksize(self.manifest, "manifest")
2019
2019
2020 for i in range(self.manifest.count()):
2020 for i in range(self.manifest.count()):
2021 n = self.manifest.node(i)
2021 n = self.manifest.node(i)
2022 l = self.manifest.linkrev(n)
2022 l = self.manifest.linkrev(n)
2023
2023
2024 if l < 0 or l >= self.changelog.count():
2024 if l < 0 or l >= self.changelog.count():
2025 err(_("bad manifest link (%d) at revision %d") % (l, i))
2025 err(_("bad manifest link (%d) at revision %d") % (l, i))
2026
2026
2027 if n in neededmanifests:
2027 if n in neededmanifests:
2028 del neededmanifests[n]
2028 del neededmanifests[n]
2029
2029
2030 if n in seen:
2030 if n in seen:
2031 err(_("duplicate manifest at revision %d") % i)
2031 err(_("duplicate manifest at revision %d") % i)
2032
2032
2033 seen[n] = 1
2033 seen[n] = 1
2034
2034
2035 for p in self.manifest.parents(n):
2035 for p in self.manifest.parents(n):
2036 if p not in self.manifest.nodemap:
2036 if p not in self.manifest.nodemap:
2037 err(_("manifest %s has unknown parent %s") %
2037 err(_("manifest %s has unknown parent %s") %
2038 (short(n), short(p)))
2038 (short(n), short(p)))
2039
2039
2040 try:
2040 try:
2041 delta = mdiff.patchtext(self.manifest.delta(n))
2041 delta = mdiff.patchtext(self.manifest.delta(n))
2042 except KeyboardInterrupt:
2042 except KeyboardInterrupt:
2043 self.ui.warn(_("interrupted"))
2043 self.ui.warn(_("interrupted"))
2044 raise
2044 raise
2045 except Exception, inst:
2045 except Exception, inst:
2046 err(_("unpacking manifest %s: %s") % (short(n), inst))
2046 err(_("unpacking manifest %s: %s") % (short(n), inst))
2047 continue
2047 continue
2048
2048
2049 try:
2049 try:
2050 ff = [ l.split('\0') for l in delta.splitlines() ]
2050 ff = [ l.split('\0') for l in delta.splitlines() ]
2051 for f, fn in ff:
2051 for f, fn in ff:
2052 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
2052 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
2053 except (ValueError, TypeError), inst:
2053 except (ValueError, TypeError), inst:
2054 err(_("broken delta in manifest %s: %s") % (short(n), inst))
2054 err(_("broken delta in manifest %s: %s") % (short(n), inst))
2055
2055
2056 self.ui.status(_("crosschecking files in changesets and manifests\n"))
2056 self.ui.status(_("crosschecking files in changesets and manifests\n"))
2057
2057
2058 for m, c in neededmanifests.items():
2058 for m, c in neededmanifests.items():
2059 err(_("Changeset %s refers to unknown manifest %s") %
2059 err(_("Changeset %s refers to unknown manifest %s") %
2060 (short(m), short(c)))
2060 (short(m), short(c)))
2061 del neededmanifests
2061 del neededmanifests
2062
2062
2063 for f in filenodes:
2063 for f in filenodes:
2064 if f not in filelinkrevs:
2064 if f not in filelinkrevs:
2065 err(_("file %s in manifest but not in changesets") % f)
2065 err(_("file %s in manifest but not in changesets") % f)
2066
2066
2067 for f in filelinkrevs:
2067 for f in filelinkrevs:
2068 if f not in filenodes:
2068 if f not in filenodes:
2069 err(_("file %s in changeset but not in manifest") % f)
2069 err(_("file %s in changeset but not in manifest") % f)
2070
2070
2071 self.ui.status(_("checking files\n"))
2071 self.ui.status(_("checking files\n"))
2072 ff = filenodes.keys()
2072 ff = filenodes.keys()
2073 ff.sort()
2073 ff.sort()
2074 for f in ff:
2074 for f in ff:
2075 if f == "/dev/null":
2075 if f == "/dev/null":
2076 continue
2076 continue
2077 files += 1
2077 files += 1
2078 if not f:
2078 if not f:
2079 err(_("file without name in manifest %s") % short(n))
2079 err(_("file without name in manifest %s") % short(n))
2080 continue
2080 continue
2081 fl = self.file(f)
2081 fl = self.file(f)
2082 checkversion(fl, f)
2082 checkversion(fl, f)
2083 checksize(fl, f)
2083 checksize(fl, f)
2084
2084
2085 nodes = {nullid: 1}
2085 nodes = {nullid: 1}
2086 seen = {}
2086 seen = {}
2087 for i in range(fl.count()):
2087 for i in range(fl.count()):
2088 revisions += 1
2088 revisions += 1
2089 n = fl.node(i)
2089 n = fl.node(i)
2090
2090
2091 if n in seen:
2091 if n in seen:
2092 err(_("%s: duplicate revision %d") % (f, i))
2092 err(_("%s: duplicate revision %d") % (f, i))
2093 if n not in filenodes[f]:
2093 if n not in filenodes[f]:
2094 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
2094 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
2095 else:
2095 else:
2096 del filenodes[f][n]
2096 del filenodes[f][n]
2097
2097
2098 flr = fl.linkrev(n)
2098 flr = fl.linkrev(n)
2099 if flr not in filelinkrevs.get(f, []):
2099 if flr not in filelinkrevs.get(f, []):
2100 err(_("%s:%s points to unexpected changeset %d")
2100 err(_("%s:%s points to unexpected changeset %d")
2101 % (f, short(n), flr))
2101 % (f, short(n), flr))
2102 else:
2102 else:
2103 filelinkrevs[f].remove(flr)
2103 filelinkrevs[f].remove(flr)
2104
2104
2105 # verify contents
2105 # verify contents
2106 try:
2106 try:
2107 t = fl.read(n)
2107 t = fl.read(n)
2108 except KeyboardInterrupt:
2108 except KeyboardInterrupt:
2109 self.ui.warn(_("interrupted"))
2109 self.ui.warn(_("interrupted"))
2110 raise
2110 raise
2111 except Exception, inst:
2111 except Exception, inst:
2112 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
2112 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
2113
2113
2114 # verify parents
2114 # verify parents
2115 (p1, p2) = fl.parents(n)
2115 (p1, p2) = fl.parents(n)
2116 if p1 not in nodes:
2116 if p1 not in nodes:
2117 err(_("file %s:%s unknown parent 1 %s") %
2117 err(_("file %s:%s unknown parent 1 %s") %
2118 (f, short(n), short(p1)))
2118 (f, short(n), short(p1)))
2119 if p2 not in nodes:
2119 if p2 not in nodes:
2120 err(_("file %s:%s unknown parent 2 %s") %
2120 err(_("file %s:%s unknown parent 2 %s") %
2121 (f, short(n), short(p1)))
2121 (f, short(n), short(p1)))
2122 nodes[n] = 1
2122 nodes[n] = 1
2123
2123
2124 # cross-check
2124 # cross-check
2125 for node in filenodes[f]:
2125 for node in filenodes[f]:
2126 err(_("node %s in manifests not in %s") % (hex(node), f))
2126 err(_("node %s in manifests not in %s") % (hex(node), f))
2127
2127
2128 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
2128 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
2129 (files, changesets, revisions))
2129 (files, changesets, revisions))
2130
2130
2131 if warnings[0]:
2131 if warnings[0]:
2132 self.ui.warn(_("%d warnings encountered!\n") % warnings[0])
2132 self.ui.warn(_("%d warnings encountered!\n") % warnings[0])
2133 if errors[0]:
2133 if errors[0]:
2134 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
2134 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
2135 return 1
2135 return 1
2136
2136
2137 # used to avoid circular references so destructors work
2137 # used to avoid circular references so destructors work
2138 def aftertrans(base):
2138 def aftertrans(base):
2139 p = base
2139 p = base
2140 def a():
2140 def a():
2141 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
2141 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
2142 util.rename(os.path.join(p, "journal.dirstate"),
2142 util.rename(os.path.join(p, "journal.dirstate"),
2143 os.path.join(p, "undo.dirstate"))
2143 os.path.join(p, "undo.dirstate"))
2144 return a
2144 return a
2145
2145
@@ -1,900 +1,900 b''
1 """
1 """
2 util.py - Mercurial utility functions and platform specfic implementations
2 util.py - Mercurial utility functions and platform specfic implementations
3
3
4 Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 Copyright 2005 K. Thananchayan <thananck@yahoo.com>
5
5
6 This software may be used and distributed according to the terms
6 This software may be used and distributed according to the terms
7 of the GNU General Public License, incorporated herein by reference.
7 of the GNU General Public License, incorporated herein by reference.
8
8
9 This contains helper routines that are independent of the SCM core and hide
9 This contains helper routines that are independent of the SCM core and hide
10 platform-specific details from the core.
10 platform-specific details from the core.
11 """
11 """
12
12
13 import os, errno
13 import os, errno
14 from i18n import gettext as _
14 from i18n import gettext as _
15 from demandload import *
15 from demandload import *
16 demandload(globals(), "cStringIO errno popen2 re shutil sys tempfile")
16 demandload(globals(), "cStringIO errno popen2 re shutil sys tempfile")
17 demandload(globals(), "threading time")
17 demandload(globals(), "threading time")
18
18
19 class SignalInterrupt(Exception):
19 class SignalInterrupt(Exception):
20 """Exception raised on SIGTERM and SIGHUP."""
20 """Exception raised on SIGTERM and SIGHUP."""
21
21
22 def pipefilter(s, cmd):
22 def pipefilter(s, cmd):
23 '''filter string S through command CMD, returning its output'''
23 '''filter string S through command CMD, returning its output'''
24 (pout, pin) = popen2.popen2(cmd, -1, 'b')
24 (pout, pin) = popen2.popen2(cmd, -1, 'b')
25 def writer():
25 def writer():
26 try:
26 try:
27 pin.write(s)
27 pin.write(s)
28 pin.close()
28 pin.close()
29 except IOError, inst:
29 except IOError, inst:
30 if inst.errno != errno.EPIPE:
30 if inst.errno != errno.EPIPE:
31 raise
31 raise
32
32
33 # we should use select instead on UNIX, but this will work on most
33 # we should use select instead on UNIX, but this will work on most
34 # systems, including Windows
34 # systems, including Windows
35 w = threading.Thread(target=writer)
35 w = threading.Thread(target=writer)
36 w.start()
36 w.start()
37 f = pout.read()
37 f = pout.read()
38 pout.close()
38 pout.close()
39 w.join()
39 w.join()
40 return f
40 return f
41
41
42 def tempfilter(s, cmd):
42 def tempfilter(s, cmd):
43 '''filter string S through a pair of temporary files with CMD.
43 '''filter string S through a pair of temporary files with CMD.
44 CMD is used as a template to create the real command to be run,
44 CMD is used as a template to create the real command to be run,
45 with the strings INFILE and OUTFILE replaced by the real names of
45 with the strings INFILE and OUTFILE replaced by the real names of
46 the temporary files generated.'''
46 the temporary files generated.'''
47 inname, outname = None, None
47 inname, outname = None, None
48 try:
48 try:
49 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
49 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
50 fp = os.fdopen(infd, 'wb')
50 fp = os.fdopen(infd, 'wb')
51 fp.write(s)
51 fp.write(s)
52 fp.close()
52 fp.close()
53 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
53 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
54 os.close(outfd)
54 os.close(outfd)
55 cmd = cmd.replace('INFILE', inname)
55 cmd = cmd.replace('INFILE', inname)
56 cmd = cmd.replace('OUTFILE', outname)
56 cmd = cmd.replace('OUTFILE', outname)
57 code = os.system(cmd)
57 code = os.system(cmd)
58 if code: raise Abort(_("command '%s' failed: %s") %
58 if code: raise Abort(_("command '%s' failed: %s") %
59 (cmd, explain_exit(code)))
59 (cmd, explain_exit(code)))
60 return open(outname, 'rb').read()
60 return open(outname, 'rb').read()
61 finally:
61 finally:
62 try:
62 try:
63 if inname: os.unlink(inname)
63 if inname: os.unlink(inname)
64 except: pass
64 except: pass
65 try:
65 try:
66 if outname: os.unlink(outname)
66 if outname: os.unlink(outname)
67 except: pass
67 except: pass
68
68
69 filtertable = {
69 filtertable = {
70 'tempfile:': tempfilter,
70 'tempfile:': tempfilter,
71 'pipe:': pipefilter,
71 'pipe:': pipefilter,
72 }
72 }
73
73
74 def filter(s, cmd):
74 def filter(s, cmd):
75 "filter a string through a command that transforms its input to its output"
75 "filter a string through a command that transforms its input to its output"
76 for name, fn in filtertable.iteritems():
76 for name, fn in filtertable.iteritems():
77 if cmd.startswith(name):
77 if cmd.startswith(name):
78 return fn(s, cmd[len(name):].lstrip())
78 return fn(s, cmd[len(name):].lstrip())
79 return pipefilter(s, cmd)
79 return pipefilter(s, cmd)
80
80
81 def find_in_path(name, path, default=None):
81 def find_in_path(name, path, default=None):
82 '''find name in search path. path can be string (will be split
82 '''find name in search path. path can be string (will be split
83 with os.pathsep), or iterable thing that returns strings. if name
83 with os.pathsep), or iterable thing that returns strings. if name
84 found, return path to name. else return default.'''
84 found, return path to name. else return default.'''
85 if isinstance(path, str):
85 if isinstance(path, str):
86 path = path.split(os.pathsep)
86 path = path.split(os.pathsep)
87 for p in path:
87 for p in path:
88 p_name = os.path.join(p, name)
88 p_name = os.path.join(p, name)
89 if os.path.exists(p_name):
89 if os.path.exists(p_name):
90 return p_name
90 return p_name
91 return default
91 return default
92
92
93 def patch(strip, patchname, ui):
93 def patch(strip, patchname, ui):
94 """apply the patch <patchname> to the working directory.
94 """apply the patch <patchname> to the working directory.
95 a list of patched files is returned"""
95 a list of patched files is returned"""
96 patcher = find_in_path('gpatch', os.environ.get('PATH', ''), 'patch')
96 patcher = find_in_path('gpatch', os.environ.get('PATH', ''), 'patch')
97 fp = os.popen('%s -p%d < "%s"' % (patcher, strip, patchname))
97 fp = os.popen('%s -p%d < "%s"' % (patcher, strip, patchname))
98 files = {}
98 files = {}
99 for line in fp:
99 for line in fp:
100 line = line.rstrip()
100 line = line.rstrip()
101 ui.status("%s\n" % line)
101 ui.status("%s\n" % line)
102 if line.startswith('patching file '):
102 if line.startswith('patching file '):
103 pf = parse_patch_output(line)
103 pf = parse_patch_output(line)
104 files.setdefault(pf, 1)
104 files.setdefault(pf, 1)
105 code = fp.close()
105 code = fp.close()
106 if code:
106 if code:
107 raise Abort(_("patch command failed: %s") % explain_exit(code)[0])
107 raise Abort(_("patch command failed: %s") % explain_exit(code)[0])
108 return files.keys()
108 return files.keys()
109
109
110 def binary(s):
110 def binary(s):
111 """return true if a string is binary data using diff's heuristic"""
111 """return true if a string is binary data using diff's heuristic"""
112 if s and '\0' in s[:4096]:
112 if s and '\0' in s[:4096]:
113 return True
113 return True
114 return False
114 return False
115
115
116 def unique(g):
116 def unique(g):
117 """return the uniq elements of iterable g"""
117 """return the uniq elements of iterable g"""
118 seen = {}
118 seen = {}
119 for f in g:
119 for f in g:
120 if f not in seen:
120 if f not in seen:
121 seen[f] = 1
121 seen[f] = 1
122 yield f
122 yield f
123
123
124 class Abort(Exception):
124 class Abort(Exception):
125 """Raised if a command needs to print an error and exit."""
125 """Raised if a command needs to print an error and exit."""
126
126
127 def always(fn): return True
127 def always(fn): return True
128 def never(fn): return False
128 def never(fn): return False
129
129
130 def patkind(name, dflt_pat='glob'):
130 def patkind(name, dflt_pat='glob'):
131 """Split a string into an optional pattern kind prefix and the
131 """Split a string into an optional pattern kind prefix and the
132 actual pattern."""
132 actual pattern."""
133 for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre':
133 for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre':
134 if name.startswith(prefix + ':'): return name.split(':', 1)
134 if name.startswith(prefix + ':'): return name.split(':', 1)
135 return dflt_pat, name
135 return dflt_pat, name
136
136
137 def globre(pat, head='^', tail='$'):
137 def globre(pat, head='^', tail='$'):
138 "convert a glob pattern into a regexp"
138 "convert a glob pattern into a regexp"
139 i, n = 0, len(pat)
139 i, n = 0, len(pat)
140 res = ''
140 res = ''
141 group = False
141 group = False
142 def peek(): return i < n and pat[i]
142 def peek(): return i < n and pat[i]
143 while i < n:
143 while i < n:
144 c = pat[i]
144 c = pat[i]
145 i = i+1
145 i = i+1
146 if c == '*':
146 if c == '*':
147 if peek() == '*':
147 if peek() == '*':
148 i += 1
148 i += 1
149 res += '.*'
149 res += '.*'
150 else:
150 else:
151 res += '[^/]*'
151 res += '[^/]*'
152 elif c == '?':
152 elif c == '?':
153 res += '.'
153 res += '.'
154 elif c == '[':
154 elif c == '[':
155 j = i
155 j = i
156 if j < n and pat[j] in '!]':
156 if j < n and pat[j] in '!]':
157 j += 1
157 j += 1
158 while j < n and pat[j] != ']':
158 while j < n and pat[j] != ']':
159 j += 1
159 j += 1
160 if j >= n:
160 if j >= n:
161 res += '\\['
161 res += '\\['
162 else:
162 else:
163 stuff = pat[i:j].replace('\\','\\\\')
163 stuff = pat[i:j].replace('\\','\\\\')
164 i = j + 1
164 i = j + 1
165 if stuff[0] == '!':
165 if stuff[0] == '!':
166 stuff = '^' + stuff[1:]
166 stuff = '^' + stuff[1:]
167 elif stuff[0] == '^':
167 elif stuff[0] == '^':
168 stuff = '\\' + stuff
168 stuff = '\\' + stuff
169 res = '%s[%s]' % (res, stuff)
169 res = '%s[%s]' % (res, stuff)
170 elif c == '{':
170 elif c == '{':
171 group = True
171 group = True
172 res += '(?:'
172 res += '(?:'
173 elif c == '}' and group:
173 elif c == '}' and group:
174 res += ')'
174 res += ')'
175 group = False
175 group = False
176 elif c == ',' and group:
176 elif c == ',' and group:
177 res += '|'
177 res += '|'
178 elif c == '\\':
178 elif c == '\\':
179 p = peek()
179 p = peek()
180 if p:
180 if p:
181 i += 1
181 i += 1
182 res += re.escape(p)
182 res += re.escape(p)
183 else:
183 else:
184 res += re.escape(c)
184 res += re.escape(c)
185 else:
185 else:
186 res += re.escape(c)
186 res += re.escape(c)
187 return head + res + tail
187 return head + res + tail
188
188
189 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
189 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
190
190
191 def pathto(n1, n2):
191 def pathto(n1, n2):
192 '''return the relative path from one place to another.
192 '''return the relative path from one place to another.
193 this returns a path in the form used by the local filesystem, not hg.'''
193 this returns a path in the form used by the local filesystem, not hg.'''
194 if not n1: return localpath(n2)
194 if not n1: return localpath(n2)
195 a, b = n1.split('/'), n2.split('/')
195 a, b = n1.split('/'), n2.split('/')
196 a.reverse()
196 a.reverse()
197 b.reverse()
197 b.reverse()
198 while a and b and a[-1] == b[-1]:
198 while a and b and a[-1] == b[-1]:
199 a.pop()
199 a.pop()
200 b.pop()
200 b.pop()
201 b.reverse()
201 b.reverse()
202 return os.sep.join((['..'] * len(a)) + b)
202 return os.sep.join((['..'] * len(a)) + b)
203
203
204 def canonpath(root, cwd, myname):
204 def canonpath(root, cwd, myname):
205 """return the canonical path of myname, given cwd and root"""
205 """return the canonical path of myname, given cwd and root"""
206 if root == os.sep:
206 if root == os.sep:
207 rootsep = os.sep
207 rootsep = os.sep
208 elif root.endswith(os.sep):
208 elif root.endswith(os.sep):
209 rootsep = root
209 rootsep = root
210 else:
210 else:
211 rootsep = root + os.sep
211 rootsep = root + os.sep
212 name = myname
212 name = myname
213 if not os.path.isabs(name):
213 if not os.path.isabs(name):
214 name = os.path.join(root, cwd, name)
214 name = os.path.join(root, cwd, name)
215 name = os.path.normpath(name)
215 name = os.path.normpath(name)
216 if name != rootsep and name.startswith(rootsep):
216 if name != rootsep and name.startswith(rootsep):
217 name = name[len(rootsep):]
217 name = name[len(rootsep):]
218 audit_path(name)
218 audit_path(name)
219 return pconvert(name)
219 return pconvert(name)
220 elif name == root:
220 elif name == root:
221 return ''
221 return ''
222 else:
222 else:
223 # Determine whether `name' is in the hierarchy at or beneath `root',
223 # Determine whether `name' is in the hierarchy at or beneath `root',
224 # by iterating name=dirname(name) until that causes no change (can't
224 # by iterating name=dirname(name) until that causes no change (can't
225 # check name == '/', because that doesn't work on windows). For each
225 # check name == '/', because that doesn't work on windows). For each
226 # `name', compare dev/inode numbers. If they match, the list `rel'
226 # `name', compare dev/inode numbers. If they match, the list `rel'
227 # holds the reversed list of components making up the relative file
227 # holds the reversed list of components making up the relative file
228 # name we want.
228 # name we want.
229 root_st = os.stat(root)
229 root_st = os.stat(root)
230 rel = []
230 rel = []
231 while True:
231 while True:
232 try:
232 try:
233 name_st = os.stat(name)
233 name_st = os.stat(name)
234 except OSError:
234 except OSError:
235 break
235 break
236 if samestat(name_st, root_st):
236 if samestat(name_st, root_st):
237 rel.reverse()
237 rel.reverse()
238 name = os.path.join(*rel)
238 name = os.path.join(*rel)
239 audit_path(name)
239 audit_path(name)
240 return pconvert(name)
240 return pconvert(name)
241 dirname, basename = os.path.split(name)
241 dirname, basename = os.path.split(name)
242 rel.append(basename)
242 rel.append(basename)
243 if dirname == name:
243 if dirname == name:
244 break
244 break
245 name = dirname
245 name = dirname
246
246
247 raise Abort('%s not under root' % myname)
247 raise Abort('%s not under root' % myname)
248
248
249 def matcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
249 def matcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
250 return _matcher(canonroot, cwd, names, inc, exc, head, 'glob', src)
250 return _matcher(canonroot, cwd, names, inc, exc, head, 'glob', src)
251
251
252 def cmdmatcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
252 def cmdmatcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
253 if os.name == 'nt':
253 if os.name == 'nt':
254 dflt_pat = 'glob'
254 dflt_pat = 'glob'
255 else:
255 else:
256 dflt_pat = 'relpath'
256 dflt_pat = 'relpath'
257 return _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src)
257 return _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src)
258
258
259 def _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src):
259 def _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src):
260 """build a function to match a set of file patterns
260 """build a function to match a set of file patterns
261
261
262 arguments:
262 arguments:
263 canonroot - the canonical root of the tree you're matching against
263 canonroot - the canonical root of the tree you're matching against
264 cwd - the current working directory, if relevant
264 cwd - the current working directory, if relevant
265 names - patterns to find
265 names - patterns to find
266 inc - patterns to include
266 inc - patterns to include
267 exc - patterns to exclude
267 exc - patterns to exclude
268 head - a regex to prepend to patterns to control whether a match is rooted
268 head - a regex to prepend to patterns to control whether a match is rooted
269
269
270 a pattern is one of:
270 a pattern is one of:
271 'glob:<rooted glob>'
271 'glob:<rooted glob>'
272 're:<rooted regexp>'
272 're:<rooted regexp>'
273 'path:<rooted path>'
273 'path:<rooted path>'
274 'relglob:<relative glob>'
274 'relglob:<relative glob>'
275 'relpath:<relative path>'
275 'relpath:<relative path>'
276 'relre:<relative regexp>'
276 'relre:<relative regexp>'
277 '<rooted path or regexp>'
277 '<rooted path or regexp>'
278
278
279 returns:
279 returns:
280 a 3-tuple containing
280 a 3-tuple containing
281 - list of explicit non-pattern names passed in
281 - list of explicit non-pattern names passed in
282 - a bool match(filename) function
282 - a bool match(filename) function
283 - a bool indicating if any patterns were passed in
283 - a bool indicating if any patterns were passed in
284
284
285 todo:
285 todo:
286 make head regex a rooted bool
286 make head regex a rooted bool
287 """
287 """
288
288
289 def contains_glob(name):
289 def contains_glob(name):
290 for c in name:
290 for c in name:
291 if c in _globchars: return True
291 if c in _globchars: return True
292 return False
292 return False
293
293
294 def regex(kind, name, tail):
294 def regex(kind, name, tail):
295 '''convert a pattern into a regular expression'''
295 '''convert a pattern into a regular expression'''
296 if kind == 're':
296 if kind == 're':
297 return name
297 return name
298 elif kind == 'path':
298 elif kind == 'path':
299 return '^' + re.escape(name) + '(?:/|$)'
299 return '^' + re.escape(name) + '(?:/|$)'
300 elif kind == 'relglob':
300 elif kind == 'relglob':
301 return head + globre(name, '(?:|.*/)', tail)
301 return head + globre(name, '(?:|.*/)', tail)
302 elif kind == 'relpath':
302 elif kind == 'relpath':
303 return head + re.escape(name) + tail
303 return head + re.escape(name) + tail
304 elif kind == 'relre':
304 elif kind == 'relre':
305 if name.startswith('^'):
305 if name.startswith('^'):
306 return name
306 return name
307 return '.*' + name
307 return '.*' + name
308 return head + globre(name, '', tail)
308 return head + globre(name, '', tail)
309
309
310 def matchfn(pats, tail):
310 def matchfn(pats, tail):
311 """build a matching function from a set of patterns"""
311 """build a matching function from a set of patterns"""
312 if not pats:
312 if not pats:
313 return
313 return
314 matches = []
314 matches = []
315 for k, p in pats:
315 for k, p in pats:
316 try:
316 try:
317 pat = '(?:%s)' % regex(k, p, tail)
317 pat = '(?:%s)' % regex(k, p, tail)
318 matches.append(re.compile(pat).match)
318 matches.append(re.compile(pat).match)
319 except re.error:
319 except re.error:
320 if src: raise Abort("%s: invalid pattern (%s): %s" % (src, k, p))
320 if src: raise Abort("%s: invalid pattern (%s): %s" % (src, k, p))
321 else: raise Abort("invalid pattern (%s): %s" % (k, p))
321 else: raise Abort("invalid pattern (%s): %s" % (k, p))
322
322
323 def buildfn(text):
323 def buildfn(text):
324 for m in matches:
324 for m in matches:
325 r = m(text)
325 r = m(text)
326 if r:
326 if r:
327 return r
327 return r
328
328
329 return buildfn
329 return buildfn
330
330
331 def globprefix(pat):
331 def globprefix(pat):
332 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
332 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
333 root = []
333 root = []
334 for p in pat.split(os.sep):
334 for p in pat.split(os.sep):
335 if contains_glob(p): break
335 if contains_glob(p): break
336 root.append(p)
336 root.append(p)
337 return '/'.join(root)
337 return '/'.join(root)
338
338
339 pats = []
339 pats = []
340 files = []
340 files = []
341 roots = []
341 roots = []
342 for kind, name in [patkind(p, dflt_pat) for p in names]:
342 for kind, name in [patkind(p, dflt_pat) for p in names]:
343 if kind in ('glob', 'relpath'):
343 if kind in ('glob', 'relpath'):
344 name = canonpath(canonroot, cwd, name)
344 name = canonpath(canonroot, cwd, name)
345 if name == '':
345 if name == '':
346 kind, name = 'glob', '**'
346 kind, name = 'glob', '**'
347 if kind in ('glob', 'path', 're'):
347 if kind in ('glob', 'path', 're'):
348 pats.append((kind, name))
348 pats.append((kind, name))
349 if kind == 'glob':
349 if kind == 'glob':
350 root = globprefix(name)
350 root = globprefix(name)
351 if root: roots.append(root)
351 if root: roots.append(root)
352 elif kind == 'relpath':
352 elif kind == 'relpath':
353 files.append((kind, name))
353 files.append((kind, name))
354 roots.append(name)
354 roots.append(name)
355
355
356 patmatch = matchfn(pats, '$') or always
356 patmatch = matchfn(pats, '$') or always
357 filematch = matchfn(files, '(?:/|$)') or always
357 filematch = matchfn(files, '(?:/|$)') or always
358 incmatch = always
358 incmatch = always
359 if inc:
359 if inc:
360 incmatch = matchfn(map(patkind, inc), '(?:/|$)')
360 incmatch = matchfn(map(patkind, inc), '(?:/|$)')
361 excmatch = lambda fn: False
361 excmatch = lambda fn: False
362 if exc:
362 if exc:
363 excmatch = matchfn(map(patkind, exc), '(?:/|$)')
363 excmatch = matchfn(map(patkind, exc), '(?:/|$)')
364
364
365 return (roots,
365 return (roots,
366 lambda fn: (incmatch(fn) and not excmatch(fn) and
366 lambda fn: (incmatch(fn) and not excmatch(fn) and
367 (fn.endswith('/') or
367 (fn.endswith('/') or
368 (not pats and not files) or
368 (not pats and not files) or
369 (pats and patmatch(fn)) or
369 (pats and patmatch(fn)) or
370 (files and filematch(fn)))),
370 (files and filematch(fn)))),
371 (inc or exc or (pats and pats != [('glob', '**')])) and True)
371 (inc or exc or (pats and pats != [('glob', '**')])) and True)
372
372
373 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
373 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
374 '''enhanced shell command execution.
374 '''enhanced shell command execution.
375 run with environment maybe modified, maybe in different dir.
375 run with environment maybe modified, maybe in different dir.
376
376
377 if command fails and onerr is None, return status. if ui object,
377 if command fails and onerr is None, return status. if ui object,
378 print error message and return status, else raise onerr object as
378 print error message and return status, else raise onerr object as
379 exception.'''
379 exception.'''
380 oldenv = {}
380 oldenv = {}
381 for k in environ:
381 for k in environ:
382 oldenv[k] = os.environ.get(k)
382 oldenv[k] = os.environ.get(k)
383 if cwd is not None:
383 if cwd is not None:
384 oldcwd = os.getcwd()
384 oldcwd = os.getcwd()
385 try:
385 try:
386 for k, v in environ.iteritems():
386 for k, v in environ.iteritems():
387 os.environ[k] = str(v)
387 os.environ[k] = str(v)
388 if cwd is not None and oldcwd != cwd:
388 if cwd is not None and oldcwd != cwd:
389 os.chdir(cwd)
389 os.chdir(cwd)
390 rc = os.system(cmd)
390 rc = os.system(cmd)
391 if rc and onerr:
391 if rc and onerr:
392 errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]),
392 errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]),
393 explain_exit(rc)[0])
393 explain_exit(rc)[0])
394 if errprefix:
394 if errprefix:
395 errmsg = '%s: %s' % (errprefix, errmsg)
395 errmsg = '%s: %s' % (errprefix, errmsg)
396 try:
396 try:
397 onerr.warn(errmsg + '\n')
397 onerr.warn(errmsg + '\n')
398 except AttributeError:
398 except AttributeError:
399 raise onerr(errmsg)
399 raise onerr(errmsg)
400 return rc
400 return rc
401 finally:
401 finally:
402 for k, v in oldenv.iteritems():
402 for k, v in oldenv.iteritems():
403 if v is None:
403 if v is None:
404 del os.environ[k]
404 del os.environ[k]
405 else:
405 else:
406 os.environ[k] = v
406 os.environ[k] = v
407 if cwd is not None and oldcwd != cwd:
407 if cwd is not None and oldcwd != cwd:
408 os.chdir(oldcwd)
408 os.chdir(oldcwd)
409
409
410 def rename(src, dst):
410 def rename(src, dst):
411 """forcibly rename a file"""
411 """forcibly rename a file"""
412 try:
412 try:
413 os.rename(src, dst)
413 os.rename(src, dst)
414 except OSError, err:
414 except OSError, err:
415 # on windows, rename to existing file is not allowed, so we
415 # on windows, rename to existing file is not allowed, so we
416 # must delete destination first. but if file is open, unlink
416 # must delete destination first. but if file is open, unlink
417 # schedules it for delete but does not delete it. rename
417 # schedules it for delete but does not delete it. rename
418 # happens immediately even for open files, so we create
418 # happens immediately even for open files, so we create
419 # temporary file, delete it, rename destination to that name,
419 # temporary file, delete it, rename destination to that name,
420 # then delete that. then rename is safe to do.
420 # then delete that. then rename is safe to do.
421 fd, temp = tempfile.mkstemp(dir=os.path.dirname(dst) or '.')
421 fd, temp = tempfile.mkstemp(dir=os.path.dirname(dst) or '.')
422 os.close(fd)
422 os.close(fd)
423 os.unlink(temp)
423 os.unlink(temp)
424 os.rename(dst, temp)
424 os.rename(dst, temp)
425 os.unlink(temp)
425 os.unlink(temp)
426 os.rename(src, dst)
426 os.rename(src, dst)
427
427
428 def unlink(f):
428 def unlink(f):
429 """unlink and remove the directory if it is empty"""
429 """unlink and remove the directory if it is empty"""
430 os.unlink(f)
430 os.unlink(f)
431 # try removing directories that might now be empty
431 # try removing directories that might now be empty
432 try:
432 try:
433 os.removedirs(os.path.dirname(f))
433 os.removedirs(os.path.dirname(f))
434 except OSError:
434 except OSError:
435 pass
435 pass
436
436
437 def copyfiles(src, dst, hardlink=None):
437 def copyfiles(src, dst, hardlink=None):
438 """Copy a directory tree using hardlinks if possible"""
438 """Copy a directory tree using hardlinks if possible"""
439
439
440 if hardlink is None:
440 if hardlink is None:
441 hardlink = (os.stat(src).st_dev ==
441 hardlink = (os.stat(src).st_dev ==
442 os.stat(os.path.dirname(dst)).st_dev)
442 os.stat(os.path.dirname(dst)).st_dev)
443
443
444 if os.path.isdir(src):
444 if os.path.isdir(src):
445 os.mkdir(dst)
445 os.mkdir(dst)
446 for name in os.listdir(src):
446 for name in os.listdir(src):
447 srcname = os.path.join(src, name)
447 srcname = os.path.join(src, name)
448 dstname = os.path.join(dst, name)
448 dstname = os.path.join(dst, name)
449 copyfiles(srcname, dstname, hardlink)
449 copyfiles(srcname, dstname, hardlink)
450 else:
450 else:
451 if hardlink:
451 if hardlink:
452 try:
452 try:
453 os_link(src, dst)
453 os_link(src, dst)
454 except (IOError, OSError):
454 except (IOError, OSError):
455 hardlink = False
455 hardlink = False
456 shutil.copy(src, dst)
456 shutil.copy(src, dst)
457 else:
457 else:
458 shutil.copy(src, dst)
458 shutil.copy(src, dst)
459
459
460 def audit_path(path):
460 def audit_path(path):
461 """Abort if path contains dangerous components"""
461 """Abort if path contains dangerous components"""
462 parts = os.path.normcase(path).split(os.sep)
462 parts = os.path.normcase(path).split(os.sep)
463 if (os.path.splitdrive(path)[0] or parts[0] in ('.hg', '')
463 if (os.path.splitdrive(path)[0] or parts[0] in ('.hg', '')
464 or os.pardir in parts):
464 or os.pardir in parts):
465 raise Abort(_("path contains illegal component: %s\n") % path)
465 raise Abort(_("path contains illegal component: %s\n") % path)
466
466
467 def _makelock_file(info, pathname):
467 def _makelock_file(info, pathname):
468 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
468 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
469 os.write(ld, info)
469 os.write(ld, info)
470 os.close(ld)
470 os.close(ld)
471
471
472 def _readlock_file(pathname):
472 def _readlock_file(pathname):
473 return posixfile(pathname).read()
473 return posixfile(pathname).read()
474
474
475 def nlinks(pathname):
475 def nlinks(pathname):
476 """Return number of hardlinks for the given file."""
476 """Return number of hardlinks for the given file."""
477 return os.stat(pathname).st_nlink
477 return os.lstat(pathname).st_nlink
478
478
479 if hasattr(os, 'link'):
479 if hasattr(os, 'link'):
480 os_link = os.link
480 os_link = os.link
481 else:
481 else:
482 def os_link(src, dst):
482 def os_link(src, dst):
483 raise OSError(0, _("Hardlinks not supported"))
483 raise OSError(0, _("Hardlinks not supported"))
484
484
485 def fstat(fp):
485 def fstat(fp):
486 '''stat file object that may not have fileno method.'''
486 '''stat file object that may not have fileno method.'''
487 try:
487 try:
488 return os.fstat(fp.fileno())
488 return os.fstat(fp.fileno())
489 except AttributeError:
489 except AttributeError:
490 return os.stat(fp.name)
490 return os.stat(fp.name)
491
491
492 posixfile = file
492 posixfile = file
493
493
494 def is_win_9x():
494 def is_win_9x():
495 '''return true if run on windows 95, 98 or me.'''
495 '''return true if run on windows 95, 98 or me.'''
496 try:
496 try:
497 return sys.getwindowsversion()[3] == 1
497 return sys.getwindowsversion()[3] == 1
498 except AttributeError:
498 except AttributeError:
499 return os.name == 'nt' and 'command' in os.environ.get('comspec', '')
499 return os.name == 'nt' and 'command' in os.environ.get('comspec', '')
500
500
501 # Platform specific variants
501 # Platform specific variants
502 if os.name == 'nt':
502 if os.name == 'nt':
503 demandload(globals(), "msvcrt")
503 demandload(globals(), "msvcrt")
504 nulldev = 'NUL:'
504 nulldev = 'NUL:'
505
505
506 class winstdout:
506 class winstdout:
507 '''stdout on windows misbehaves if sent through a pipe'''
507 '''stdout on windows misbehaves if sent through a pipe'''
508
508
509 def __init__(self, fp):
509 def __init__(self, fp):
510 self.fp = fp
510 self.fp = fp
511
511
512 def __getattr__(self, key):
512 def __getattr__(self, key):
513 return getattr(self.fp, key)
513 return getattr(self.fp, key)
514
514
515 def close(self):
515 def close(self):
516 try:
516 try:
517 self.fp.close()
517 self.fp.close()
518 except: pass
518 except: pass
519
519
520 def write(self, s):
520 def write(self, s):
521 try:
521 try:
522 return self.fp.write(s)
522 return self.fp.write(s)
523 except IOError, inst:
523 except IOError, inst:
524 if inst.errno != 0: raise
524 if inst.errno != 0: raise
525 self.close()
525 self.close()
526 raise IOError(errno.EPIPE, 'Broken pipe')
526 raise IOError(errno.EPIPE, 'Broken pipe')
527
527
528 sys.stdout = winstdout(sys.stdout)
528 sys.stdout = winstdout(sys.stdout)
529
529
530 def system_rcpath():
530 def system_rcpath():
531 try:
531 try:
532 return system_rcpath_win32()
532 return system_rcpath_win32()
533 except:
533 except:
534 return [r'c:\mercurial\mercurial.ini']
534 return [r'c:\mercurial\mercurial.ini']
535
535
536 def os_rcpath():
536 def os_rcpath():
537 '''return default os-specific hgrc search path'''
537 '''return default os-specific hgrc search path'''
538 path = system_rcpath()
538 path = system_rcpath()
539 path.append(user_rcpath())
539 path.append(user_rcpath())
540 userprofile = os.environ.get('USERPROFILE')
540 userprofile = os.environ.get('USERPROFILE')
541 if userprofile:
541 if userprofile:
542 path.append(os.path.join(userprofile, 'mercurial.ini'))
542 path.append(os.path.join(userprofile, 'mercurial.ini'))
543 return path
543 return path
544
544
545 def user_rcpath():
545 def user_rcpath():
546 '''return os-specific hgrc search path to the user dir'''
546 '''return os-specific hgrc search path to the user dir'''
547 return os.path.join(os.path.expanduser('~'), 'mercurial.ini')
547 return os.path.join(os.path.expanduser('~'), 'mercurial.ini')
548
548
549 def parse_patch_output(output_line):
549 def parse_patch_output(output_line):
550 """parses the output produced by patch and returns the file name"""
550 """parses the output produced by patch and returns the file name"""
551 pf = output_line[14:]
551 pf = output_line[14:]
552 if pf[0] == '`':
552 if pf[0] == '`':
553 pf = pf[1:-1] # Remove the quotes
553 pf = pf[1:-1] # Remove the quotes
554 return pf
554 return pf
555
555
556 def testpid(pid):
556 def testpid(pid):
557 '''return False if pid dead, True if running or not known'''
557 '''return False if pid dead, True if running or not known'''
558 return True
558 return True
559
559
560 def is_exec(f, last):
560 def is_exec(f, last):
561 return last
561 return last
562
562
563 def set_exec(f, mode):
563 def set_exec(f, mode):
564 pass
564 pass
565
565
566 def set_binary(fd):
566 def set_binary(fd):
567 msvcrt.setmode(fd.fileno(), os.O_BINARY)
567 msvcrt.setmode(fd.fileno(), os.O_BINARY)
568
568
569 def pconvert(path):
569 def pconvert(path):
570 return path.replace("\\", "/")
570 return path.replace("\\", "/")
571
571
572 def localpath(path):
572 def localpath(path):
573 return path.replace('/', '\\')
573 return path.replace('/', '\\')
574
574
575 def normpath(path):
575 def normpath(path):
576 return pconvert(os.path.normpath(path))
576 return pconvert(os.path.normpath(path))
577
577
578 makelock = _makelock_file
578 makelock = _makelock_file
579 readlock = _readlock_file
579 readlock = _readlock_file
580
580
581 def samestat(s1, s2):
581 def samestat(s1, s2):
582 return False
582 return False
583
583
584 def explain_exit(code):
584 def explain_exit(code):
585 return _("exited with status %d") % code, code
585 return _("exited with status %d") % code, code
586
586
587 try:
587 try:
588 # override functions with win32 versions if possible
588 # override functions with win32 versions if possible
589 from util_win32 import *
589 from util_win32 import *
590 if not is_win_9x():
590 if not is_win_9x():
591 posixfile = posixfile_nt
591 posixfile = posixfile_nt
592 except ImportError:
592 except ImportError:
593 pass
593 pass
594
594
595 else:
595 else:
596 nulldev = '/dev/null'
596 nulldev = '/dev/null'
597
597
598 def rcfiles(path):
598 def rcfiles(path):
599 rcs = [os.path.join(path, 'hgrc')]
599 rcs = [os.path.join(path, 'hgrc')]
600 rcdir = os.path.join(path, 'hgrc.d')
600 rcdir = os.path.join(path, 'hgrc.d')
601 try:
601 try:
602 rcs.extend([os.path.join(rcdir, f) for f in os.listdir(rcdir)
602 rcs.extend([os.path.join(rcdir, f) for f in os.listdir(rcdir)
603 if f.endswith(".rc")])
603 if f.endswith(".rc")])
604 except OSError, inst: pass
604 except OSError, inst: pass
605 return rcs
605 return rcs
606
606
607 def os_rcpath():
607 def os_rcpath():
608 '''return default os-specific hgrc search path'''
608 '''return default os-specific hgrc search path'''
609 path = []
609 path = []
610 # old mod_python does not set sys.argv
610 # old mod_python does not set sys.argv
611 if len(getattr(sys, 'argv', [])) > 0:
611 if len(getattr(sys, 'argv', [])) > 0:
612 path.extend(rcfiles(os.path.dirname(sys.argv[0]) +
612 path.extend(rcfiles(os.path.dirname(sys.argv[0]) +
613 '/../etc/mercurial'))
613 '/../etc/mercurial'))
614 path.extend(rcfiles('/etc/mercurial'))
614 path.extend(rcfiles('/etc/mercurial'))
615 path.append(os.path.expanduser('~/.hgrc'))
615 path.append(os.path.expanduser('~/.hgrc'))
616 path = [os.path.normpath(f) for f in path]
616 path = [os.path.normpath(f) for f in path]
617 return path
617 return path
618
618
619 def parse_patch_output(output_line):
619 def parse_patch_output(output_line):
620 """parses the output produced by patch and returns the file name"""
620 """parses the output produced by patch and returns the file name"""
621 pf = output_line[14:]
621 pf = output_line[14:]
622 if pf.startswith("'") and pf.endswith("'") and pf.find(" ") >= 0:
622 if pf.startswith("'") and pf.endswith("'") and pf.find(" ") >= 0:
623 pf = pf[1:-1] # Remove the quotes
623 pf = pf[1:-1] # Remove the quotes
624 return pf
624 return pf
625
625
626 def is_exec(f, last):
626 def is_exec(f, last):
627 """check whether a file is executable"""
627 """check whether a file is executable"""
628 return (os.stat(f).st_mode & 0100 != 0)
628 return (os.lstat(f).st_mode & 0100 != 0)
629
629
630 def set_exec(f, mode):
630 def set_exec(f, mode):
631 s = os.stat(f).st_mode
631 s = os.lstat(f).st_mode
632 if (s & 0100 != 0) == mode:
632 if (s & 0100 != 0) == mode:
633 return
633 return
634 if mode:
634 if mode:
635 # Turn on +x for every +r bit when making a file executable
635 # Turn on +x for every +r bit when making a file executable
636 # and obey umask.
636 # and obey umask.
637 umask = os.umask(0)
637 umask = os.umask(0)
638 os.umask(umask)
638 os.umask(umask)
639 os.chmod(f, s | (s & 0444) >> 2 & ~umask)
639 os.chmod(f, s | (s & 0444) >> 2 & ~umask)
640 else:
640 else:
641 os.chmod(f, s & 0666)
641 os.chmod(f, s & 0666)
642
642
643 def set_binary(fd):
643 def set_binary(fd):
644 pass
644 pass
645
645
646 def pconvert(path):
646 def pconvert(path):
647 return path
647 return path
648
648
649 def localpath(path):
649 def localpath(path):
650 return path
650 return path
651
651
652 normpath = os.path.normpath
652 normpath = os.path.normpath
653 samestat = os.path.samestat
653 samestat = os.path.samestat
654
654
655 def makelock(info, pathname):
655 def makelock(info, pathname):
656 try:
656 try:
657 os.symlink(info, pathname)
657 os.symlink(info, pathname)
658 except OSError, why:
658 except OSError, why:
659 if why.errno == errno.EEXIST:
659 if why.errno == errno.EEXIST:
660 raise
660 raise
661 else:
661 else:
662 _makelock_file(info, pathname)
662 _makelock_file(info, pathname)
663
663
664 def readlock(pathname):
664 def readlock(pathname):
665 try:
665 try:
666 return os.readlink(pathname)
666 return os.readlink(pathname)
667 except OSError, why:
667 except OSError, why:
668 if why.errno == errno.EINVAL:
668 if why.errno == errno.EINVAL:
669 return _readlock_file(pathname)
669 return _readlock_file(pathname)
670 else:
670 else:
671 raise
671 raise
672
672
673 def testpid(pid):
673 def testpid(pid):
674 '''return False if pid dead, True if running or not sure'''
674 '''return False if pid dead, True if running or not sure'''
675 try:
675 try:
676 os.kill(pid, 0)
676 os.kill(pid, 0)
677 return True
677 return True
678 except OSError, inst:
678 except OSError, inst:
679 return inst.errno != errno.ESRCH
679 return inst.errno != errno.ESRCH
680
680
681 def explain_exit(code):
681 def explain_exit(code):
682 """return a 2-tuple (desc, code) describing a process's status"""
682 """return a 2-tuple (desc, code) describing a process's status"""
683 if os.WIFEXITED(code):
683 if os.WIFEXITED(code):
684 val = os.WEXITSTATUS(code)
684 val = os.WEXITSTATUS(code)
685 return _("exited with status %d") % val, val
685 return _("exited with status %d") % val, val
686 elif os.WIFSIGNALED(code):
686 elif os.WIFSIGNALED(code):
687 val = os.WTERMSIG(code)
687 val = os.WTERMSIG(code)
688 return _("killed by signal %d") % val, val
688 return _("killed by signal %d") % val, val
689 elif os.WIFSTOPPED(code):
689 elif os.WIFSTOPPED(code):
690 val = os.WSTOPSIG(code)
690 val = os.WSTOPSIG(code)
691 return _("stopped by signal %d") % val, val
691 return _("stopped by signal %d") % val, val
692 raise ValueError(_("invalid exit code"))
692 raise ValueError(_("invalid exit code"))
693
693
694 def opener(base, audit=True):
694 def opener(base, audit=True):
695 """
695 """
696 return a function that opens files relative to base
696 return a function that opens files relative to base
697
697
698 this function is used to hide the details of COW semantics and
698 this function is used to hide the details of COW semantics and
699 remote file access from higher level code.
699 remote file access from higher level code.
700 """
700 """
701 p = base
701 p = base
702 audit_p = audit
702 audit_p = audit
703
703
704 def mktempcopy(name):
704 def mktempcopy(name):
705 d, fn = os.path.split(name)
705 d, fn = os.path.split(name)
706 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
706 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
707 os.close(fd)
707 os.close(fd)
708 ofp = posixfile(temp, "wb")
708 ofp = posixfile(temp, "wb")
709 try:
709 try:
710 try:
710 try:
711 ifp = posixfile(name, "rb")
711 ifp = posixfile(name, "rb")
712 except IOError, inst:
712 except IOError, inst:
713 if not getattr(inst, 'filename', None):
713 if not getattr(inst, 'filename', None):
714 inst.filename = name
714 inst.filename = name
715 raise
715 raise
716 for chunk in filechunkiter(ifp):
716 for chunk in filechunkiter(ifp):
717 ofp.write(chunk)
717 ofp.write(chunk)
718 ifp.close()
718 ifp.close()
719 ofp.close()
719 ofp.close()
720 except:
720 except:
721 try: os.unlink(temp)
721 try: os.unlink(temp)
722 except: pass
722 except: pass
723 raise
723 raise
724 st = os.lstat(name)
724 st = os.lstat(name)
725 os.chmod(temp, st.st_mode)
725 os.chmod(temp, st.st_mode)
726 return temp
726 return temp
727
727
728 class atomictempfile(posixfile):
728 class atomictempfile(posixfile):
729 """the file will only be copied when rename is called"""
729 """the file will only be copied when rename is called"""
730 def __init__(self, name, mode):
730 def __init__(self, name, mode):
731 self.__name = name
731 self.__name = name
732 self.temp = mktempcopy(name)
732 self.temp = mktempcopy(name)
733 posixfile.__init__(self, self.temp, mode)
733 posixfile.__init__(self, self.temp, mode)
734 def rename(self):
734 def rename(self):
735 if not self.closed:
735 if not self.closed:
736 posixfile.close(self)
736 posixfile.close(self)
737 rename(self.temp, localpath(self.__name))
737 rename(self.temp, localpath(self.__name))
738 def __del__(self):
738 def __del__(self):
739 if not self.closed:
739 if not self.closed:
740 try:
740 try:
741 os.unlink(self.temp)
741 os.unlink(self.temp)
742 except: pass
742 except: pass
743 posixfile.close(self)
743 posixfile.close(self)
744
744
745 class atomicfile(atomictempfile):
745 class atomicfile(atomictempfile):
746 """the file will only be copied on close"""
746 """the file will only be copied on close"""
747 def __init__(self, name, mode):
747 def __init__(self, name, mode):
748 atomictempfile.__init__(self, name, mode)
748 atomictempfile.__init__(self, name, mode)
749 def close(self):
749 def close(self):
750 self.rename()
750 self.rename()
751 def __del__(self):
751 def __del__(self):
752 self.rename()
752 self.rename()
753
753
754 def o(path, mode="r", text=False, atomic=False, atomictemp=False):
754 def o(path, mode="r", text=False, atomic=False, atomictemp=False):
755 if audit_p:
755 if audit_p:
756 audit_path(path)
756 audit_path(path)
757 f = os.path.join(p, path)
757 f = os.path.join(p, path)
758
758
759 if not text:
759 if not text:
760 mode += "b" # for that other OS
760 mode += "b" # for that other OS
761
761
762 if mode[0] != "r":
762 if mode[0] != "r":
763 try:
763 try:
764 nlink = nlinks(f)
764 nlink = nlinks(f)
765 except OSError:
765 except OSError:
766 d = os.path.dirname(f)
766 d = os.path.dirname(f)
767 if not os.path.isdir(d):
767 if not os.path.isdir(d):
768 os.makedirs(d)
768 os.makedirs(d)
769 else:
769 else:
770 if atomic:
770 if atomic:
771 return atomicfile(f, mode)
771 return atomicfile(f, mode)
772 elif atomictemp:
772 elif atomictemp:
773 return atomictempfile(f, mode)
773 return atomictempfile(f, mode)
774 if nlink > 1:
774 if nlink > 1:
775 rename(mktempcopy(f), f)
775 rename(mktempcopy(f), f)
776 return posixfile(f, mode)
776 return posixfile(f, mode)
777
777
778 return o
778 return o
779
779
780 class chunkbuffer(object):
780 class chunkbuffer(object):
781 """Allow arbitrary sized chunks of data to be efficiently read from an
781 """Allow arbitrary sized chunks of data to be efficiently read from an
782 iterator over chunks of arbitrary size."""
782 iterator over chunks of arbitrary size."""
783
783
784 def __init__(self, in_iter, targetsize = 2**16):
784 def __init__(self, in_iter, targetsize = 2**16):
785 """in_iter is the iterator that's iterating over the input chunks.
785 """in_iter is the iterator that's iterating over the input chunks.
786 targetsize is how big a buffer to try to maintain."""
786 targetsize is how big a buffer to try to maintain."""
787 self.in_iter = iter(in_iter)
787 self.in_iter = iter(in_iter)
788 self.buf = ''
788 self.buf = ''
789 self.targetsize = int(targetsize)
789 self.targetsize = int(targetsize)
790 if self.targetsize <= 0:
790 if self.targetsize <= 0:
791 raise ValueError(_("targetsize must be greater than 0, was %d") %
791 raise ValueError(_("targetsize must be greater than 0, was %d") %
792 targetsize)
792 targetsize)
793 self.iterempty = False
793 self.iterempty = False
794
794
795 def fillbuf(self):
795 def fillbuf(self):
796 """Ignore target size; read every chunk from iterator until empty."""
796 """Ignore target size; read every chunk from iterator until empty."""
797 if not self.iterempty:
797 if not self.iterempty:
798 collector = cStringIO.StringIO()
798 collector = cStringIO.StringIO()
799 collector.write(self.buf)
799 collector.write(self.buf)
800 for ch in self.in_iter:
800 for ch in self.in_iter:
801 collector.write(ch)
801 collector.write(ch)
802 self.buf = collector.getvalue()
802 self.buf = collector.getvalue()
803 self.iterempty = True
803 self.iterempty = True
804
804
805 def read(self, l):
805 def read(self, l):
806 """Read L bytes of data from the iterator of chunks of data.
806 """Read L bytes of data from the iterator of chunks of data.
807 Returns less than L bytes if the iterator runs dry."""
807 Returns less than L bytes if the iterator runs dry."""
808 if l > len(self.buf) and not self.iterempty:
808 if l > len(self.buf) and not self.iterempty:
809 # Clamp to a multiple of self.targetsize
809 # Clamp to a multiple of self.targetsize
810 targetsize = self.targetsize * ((l // self.targetsize) + 1)
810 targetsize = self.targetsize * ((l // self.targetsize) + 1)
811 collector = cStringIO.StringIO()
811 collector = cStringIO.StringIO()
812 collector.write(self.buf)
812 collector.write(self.buf)
813 collected = len(self.buf)
813 collected = len(self.buf)
814 for chunk in self.in_iter:
814 for chunk in self.in_iter:
815 collector.write(chunk)
815 collector.write(chunk)
816 collected += len(chunk)
816 collected += len(chunk)
817 if collected >= targetsize:
817 if collected >= targetsize:
818 break
818 break
819 if collected < targetsize:
819 if collected < targetsize:
820 self.iterempty = True
820 self.iterempty = True
821 self.buf = collector.getvalue()
821 self.buf = collector.getvalue()
822 s, self.buf = self.buf[:l], buffer(self.buf, l)
822 s, self.buf = self.buf[:l], buffer(self.buf, l)
823 return s
823 return s
824
824
825 def filechunkiter(f, size = 65536):
825 def filechunkiter(f, size = 65536):
826 """Create a generator that produces all the data in the file size
826 """Create a generator that produces all the data in the file size
827 (default 65536) bytes at a time. Chunks may be less than size
827 (default 65536) bytes at a time. Chunks may be less than size
828 bytes if the chunk is the last chunk in the file, or the file is a
828 bytes if the chunk is the last chunk in the file, or the file is a
829 socket or some other type of file that sometimes reads less data
829 socket or some other type of file that sometimes reads less data
830 than is requested."""
830 than is requested."""
831 s = f.read(size)
831 s = f.read(size)
832 while len(s) > 0:
832 while len(s) > 0:
833 yield s
833 yield s
834 s = f.read(size)
834 s = f.read(size)
835
835
836 def makedate():
836 def makedate():
837 lt = time.localtime()
837 lt = time.localtime()
838 if lt[8] == 1 and time.daylight:
838 if lt[8] == 1 and time.daylight:
839 tz = time.altzone
839 tz = time.altzone
840 else:
840 else:
841 tz = time.timezone
841 tz = time.timezone
842 return time.mktime(lt), tz
842 return time.mktime(lt), tz
843
843
844 def datestr(date=None, format='%a %b %d %H:%M:%S %Y', timezone=True):
844 def datestr(date=None, format='%a %b %d %H:%M:%S %Y', timezone=True):
845 """represent a (unixtime, offset) tuple as a localized time.
845 """represent a (unixtime, offset) tuple as a localized time.
846 unixtime is seconds since the epoch, and offset is the time zone's
846 unixtime is seconds since the epoch, and offset is the time zone's
847 number of seconds away from UTC. if timezone is false, do not
847 number of seconds away from UTC. if timezone is false, do not
848 append time zone to string."""
848 append time zone to string."""
849 t, tz = date or makedate()
849 t, tz = date or makedate()
850 s = time.strftime(format, time.gmtime(float(t) - tz))
850 s = time.strftime(format, time.gmtime(float(t) - tz))
851 if timezone:
851 if timezone:
852 s += " %+03d%02d" % (-tz / 3600, ((-tz % 3600) / 60))
852 s += " %+03d%02d" % (-tz / 3600, ((-tz % 3600) / 60))
853 return s
853 return s
854
854
855 def shortuser(user):
855 def shortuser(user):
856 """Return a short representation of a user name or email address."""
856 """Return a short representation of a user name or email address."""
857 f = user.find('@')
857 f = user.find('@')
858 if f >= 0:
858 if f >= 0:
859 user = user[:f]
859 user = user[:f]
860 f = user.find('<')
860 f = user.find('<')
861 if f >= 0:
861 if f >= 0:
862 user = user[f+1:]
862 user = user[f+1:]
863 return user
863 return user
864
864
865 def walkrepos(path):
865 def walkrepos(path):
866 '''yield every hg repository under path, recursively.'''
866 '''yield every hg repository under path, recursively.'''
867 def errhandler(err):
867 def errhandler(err):
868 if err.filename == path:
868 if err.filename == path:
869 raise err
869 raise err
870
870
871 for root, dirs, files in os.walk(path, onerror=errhandler):
871 for root, dirs, files in os.walk(path, onerror=errhandler):
872 for d in dirs:
872 for d in dirs:
873 if d == '.hg':
873 if d == '.hg':
874 yield root
874 yield root
875 dirs[:] = []
875 dirs[:] = []
876 break
876 break
877
877
878 _rcpath = None
878 _rcpath = None
879
879
880 def rcpath():
880 def rcpath():
881 '''return hgrc search path. if env var HGRCPATH is set, use it.
881 '''return hgrc search path. if env var HGRCPATH is set, use it.
882 for each item in path, if directory, use files ending in .rc,
882 for each item in path, if directory, use files ending in .rc,
883 else use item.
883 else use item.
884 make HGRCPATH empty to only look in .hg/hgrc of current repo.
884 make HGRCPATH empty to only look in .hg/hgrc of current repo.
885 if no HGRCPATH, use default os-specific path.'''
885 if no HGRCPATH, use default os-specific path.'''
886 global _rcpath
886 global _rcpath
887 if _rcpath is None:
887 if _rcpath is None:
888 if 'HGRCPATH' in os.environ:
888 if 'HGRCPATH' in os.environ:
889 _rcpath = []
889 _rcpath = []
890 for p in os.environ['HGRCPATH'].split(os.pathsep):
890 for p in os.environ['HGRCPATH'].split(os.pathsep):
891 if not p: continue
891 if not p: continue
892 if os.path.isdir(p):
892 if os.path.isdir(p):
893 for f in os.listdir(p):
893 for f in os.listdir(p):
894 if f.endswith('.rc'):
894 if f.endswith('.rc'):
895 _rcpath.append(os.path.join(p, f))
895 _rcpath.append(os.path.join(p, f))
896 else:
896 else:
897 _rcpath.append(p)
897 _rcpath.append(p)
898 else:
898 else:
899 _rcpath = os_rcpath()
899 _rcpath = os_rcpath()
900 return _rcpath
900 return _rcpath
@@ -1,299 +1,299 b''
1 # util_win32.py - utility functions that use win32 API
1 # util_win32.py - utility functions that use win32 API
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of
6 # This software may be used and distributed according to the terms of
7 # the GNU General Public License, incorporated herein by reference.
7 # the GNU General Public License, incorporated herein by reference.
8
8
9 # Mark Hammond's win32all package allows better functionality on
9 # Mark Hammond's win32all package allows better functionality on
10 # Windows. this module overrides definitions in util.py. if not
10 # Windows. this module overrides definitions in util.py. if not
11 # available, import of this module will fail, and generic code will be
11 # available, import of this module will fail, and generic code will be
12 # used.
12 # used.
13
13
14 import win32api
14 import win32api
15
15
16 from demandload import *
16 from demandload import *
17 from i18n import gettext as _
17 from i18n import gettext as _
18 demandload(globals(), 'errno os pywintypes win32con win32file win32process')
18 demandload(globals(), 'errno os pywintypes win32con win32file win32process')
19 demandload(globals(), 'cStringIO win32com.shell:shell,shellcon winerror')
19 demandload(globals(), 'cStringIO win32com.shell:shell,shellcon winerror')
20
20
21 class WinError:
21 class WinError:
22 winerror_map = {
22 winerror_map = {
23 winerror.ERROR_ACCESS_DENIED: errno.EACCES,
23 winerror.ERROR_ACCESS_DENIED: errno.EACCES,
24 winerror.ERROR_ACCOUNT_DISABLED: errno.EACCES,
24 winerror.ERROR_ACCOUNT_DISABLED: errno.EACCES,
25 winerror.ERROR_ACCOUNT_RESTRICTION: errno.EACCES,
25 winerror.ERROR_ACCOUNT_RESTRICTION: errno.EACCES,
26 winerror.ERROR_ALREADY_ASSIGNED: errno.EBUSY,
26 winerror.ERROR_ALREADY_ASSIGNED: errno.EBUSY,
27 winerror.ERROR_ALREADY_EXISTS: errno.EEXIST,
27 winerror.ERROR_ALREADY_EXISTS: errno.EEXIST,
28 winerror.ERROR_ARITHMETIC_OVERFLOW: errno.ERANGE,
28 winerror.ERROR_ARITHMETIC_OVERFLOW: errno.ERANGE,
29 winerror.ERROR_BAD_COMMAND: errno.EIO,
29 winerror.ERROR_BAD_COMMAND: errno.EIO,
30 winerror.ERROR_BAD_DEVICE: errno.ENODEV,
30 winerror.ERROR_BAD_DEVICE: errno.ENODEV,
31 winerror.ERROR_BAD_DRIVER_LEVEL: errno.ENXIO,
31 winerror.ERROR_BAD_DRIVER_LEVEL: errno.ENXIO,
32 winerror.ERROR_BAD_EXE_FORMAT: errno.ENOEXEC,
32 winerror.ERROR_BAD_EXE_FORMAT: errno.ENOEXEC,
33 winerror.ERROR_BAD_FORMAT: errno.ENOEXEC,
33 winerror.ERROR_BAD_FORMAT: errno.ENOEXEC,
34 winerror.ERROR_BAD_LENGTH: errno.EINVAL,
34 winerror.ERROR_BAD_LENGTH: errno.EINVAL,
35 winerror.ERROR_BAD_PATHNAME: errno.ENOENT,
35 winerror.ERROR_BAD_PATHNAME: errno.ENOENT,
36 winerror.ERROR_BAD_PIPE: errno.EPIPE,
36 winerror.ERROR_BAD_PIPE: errno.EPIPE,
37 winerror.ERROR_BAD_UNIT: errno.ENODEV,
37 winerror.ERROR_BAD_UNIT: errno.ENODEV,
38 winerror.ERROR_BAD_USERNAME: errno.EINVAL,
38 winerror.ERROR_BAD_USERNAME: errno.EINVAL,
39 winerror.ERROR_BROKEN_PIPE: errno.EPIPE,
39 winerror.ERROR_BROKEN_PIPE: errno.EPIPE,
40 winerror.ERROR_BUFFER_OVERFLOW: errno.ENAMETOOLONG,
40 winerror.ERROR_BUFFER_OVERFLOW: errno.ENAMETOOLONG,
41 winerror.ERROR_BUSY: errno.EBUSY,
41 winerror.ERROR_BUSY: errno.EBUSY,
42 winerror.ERROR_BUSY_DRIVE: errno.EBUSY,
42 winerror.ERROR_BUSY_DRIVE: errno.EBUSY,
43 winerror.ERROR_CALL_NOT_IMPLEMENTED: errno.ENOSYS,
43 winerror.ERROR_CALL_NOT_IMPLEMENTED: errno.ENOSYS,
44 winerror.ERROR_CANNOT_MAKE: errno.EACCES,
44 winerror.ERROR_CANNOT_MAKE: errno.EACCES,
45 winerror.ERROR_CANTOPEN: errno.EIO,
45 winerror.ERROR_CANTOPEN: errno.EIO,
46 winerror.ERROR_CANTREAD: errno.EIO,
46 winerror.ERROR_CANTREAD: errno.EIO,
47 winerror.ERROR_CANTWRITE: errno.EIO,
47 winerror.ERROR_CANTWRITE: errno.EIO,
48 winerror.ERROR_CRC: errno.EIO,
48 winerror.ERROR_CRC: errno.EIO,
49 winerror.ERROR_CURRENT_DIRECTORY: errno.EACCES,
49 winerror.ERROR_CURRENT_DIRECTORY: errno.EACCES,
50 winerror.ERROR_DEVICE_IN_USE: errno.EBUSY,
50 winerror.ERROR_DEVICE_IN_USE: errno.EBUSY,
51 winerror.ERROR_DEV_NOT_EXIST: errno.ENODEV,
51 winerror.ERROR_DEV_NOT_EXIST: errno.ENODEV,
52 winerror.ERROR_DIRECTORY: errno.EINVAL,
52 winerror.ERROR_DIRECTORY: errno.EINVAL,
53 winerror.ERROR_DIR_NOT_EMPTY: errno.ENOTEMPTY,
53 winerror.ERROR_DIR_NOT_EMPTY: errno.ENOTEMPTY,
54 winerror.ERROR_DISK_CHANGE: errno.EIO,
54 winerror.ERROR_DISK_CHANGE: errno.EIO,
55 winerror.ERROR_DISK_FULL: errno.ENOSPC,
55 winerror.ERROR_DISK_FULL: errno.ENOSPC,
56 winerror.ERROR_DRIVE_LOCKED: errno.EBUSY,
56 winerror.ERROR_DRIVE_LOCKED: errno.EBUSY,
57 winerror.ERROR_ENVVAR_NOT_FOUND: errno.EINVAL,
57 winerror.ERROR_ENVVAR_NOT_FOUND: errno.EINVAL,
58 winerror.ERROR_EXE_MARKED_INVALID: errno.ENOEXEC,
58 winerror.ERROR_EXE_MARKED_INVALID: errno.ENOEXEC,
59 winerror.ERROR_FILENAME_EXCED_RANGE: errno.ENAMETOOLONG,
59 winerror.ERROR_FILENAME_EXCED_RANGE: errno.ENAMETOOLONG,
60 winerror.ERROR_FILE_EXISTS: errno.EEXIST,
60 winerror.ERROR_FILE_EXISTS: errno.EEXIST,
61 winerror.ERROR_FILE_INVALID: errno.ENODEV,
61 winerror.ERROR_FILE_INVALID: errno.ENODEV,
62 winerror.ERROR_FILE_NOT_FOUND: errno.ENOENT,
62 winerror.ERROR_FILE_NOT_FOUND: errno.ENOENT,
63 winerror.ERROR_GEN_FAILURE: errno.EIO,
63 winerror.ERROR_GEN_FAILURE: errno.EIO,
64 winerror.ERROR_HANDLE_DISK_FULL: errno.ENOSPC,
64 winerror.ERROR_HANDLE_DISK_FULL: errno.ENOSPC,
65 winerror.ERROR_INSUFFICIENT_BUFFER: errno.ENOMEM,
65 winerror.ERROR_INSUFFICIENT_BUFFER: errno.ENOMEM,
66 winerror.ERROR_INVALID_ACCESS: errno.EACCES,
66 winerror.ERROR_INVALID_ACCESS: errno.EACCES,
67 winerror.ERROR_INVALID_ADDRESS: errno.EFAULT,
67 winerror.ERROR_INVALID_ADDRESS: errno.EFAULT,
68 winerror.ERROR_INVALID_BLOCK: errno.EFAULT,
68 winerror.ERROR_INVALID_BLOCK: errno.EFAULT,
69 winerror.ERROR_INVALID_DATA: errno.EINVAL,
69 winerror.ERROR_INVALID_DATA: errno.EINVAL,
70 winerror.ERROR_INVALID_DRIVE: errno.ENODEV,
70 winerror.ERROR_INVALID_DRIVE: errno.ENODEV,
71 winerror.ERROR_INVALID_EXE_SIGNATURE: errno.ENOEXEC,
71 winerror.ERROR_INVALID_EXE_SIGNATURE: errno.ENOEXEC,
72 winerror.ERROR_INVALID_FLAGS: errno.EINVAL,
72 winerror.ERROR_INVALID_FLAGS: errno.EINVAL,
73 winerror.ERROR_INVALID_FUNCTION: errno.ENOSYS,
73 winerror.ERROR_INVALID_FUNCTION: errno.ENOSYS,
74 winerror.ERROR_INVALID_HANDLE: errno.EBADF,
74 winerror.ERROR_INVALID_HANDLE: errno.EBADF,
75 winerror.ERROR_INVALID_LOGON_HOURS: errno.EACCES,
75 winerror.ERROR_INVALID_LOGON_HOURS: errno.EACCES,
76 winerror.ERROR_INVALID_NAME: errno.EINVAL,
76 winerror.ERROR_INVALID_NAME: errno.EINVAL,
77 winerror.ERROR_INVALID_OWNER: errno.EINVAL,
77 winerror.ERROR_INVALID_OWNER: errno.EINVAL,
78 winerror.ERROR_INVALID_PARAMETER: errno.EINVAL,
78 winerror.ERROR_INVALID_PARAMETER: errno.EINVAL,
79 winerror.ERROR_INVALID_PASSWORD: errno.EPERM,
79 winerror.ERROR_INVALID_PASSWORD: errno.EPERM,
80 winerror.ERROR_INVALID_PRIMARY_GROUP: errno.EINVAL,
80 winerror.ERROR_INVALID_PRIMARY_GROUP: errno.EINVAL,
81 winerror.ERROR_INVALID_SIGNAL_NUMBER: errno.EINVAL,
81 winerror.ERROR_INVALID_SIGNAL_NUMBER: errno.EINVAL,
82 winerror.ERROR_INVALID_TARGET_HANDLE: errno.EIO,
82 winerror.ERROR_INVALID_TARGET_HANDLE: errno.EIO,
83 winerror.ERROR_INVALID_WORKSTATION: errno.EACCES,
83 winerror.ERROR_INVALID_WORKSTATION: errno.EACCES,
84 winerror.ERROR_IO_DEVICE: errno.EIO,
84 winerror.ERROR_IO_DEVICE: errno.EIO,
85 winerror.ERROR_IO_INCOMPLETE: errno.EINTR,
85 winerror.ERROR_IO_INCOMPLETE: errno.EINTR,
86 winerror.ERROR_LOCKED: errno.EBUSY,
86 winerror.ERROR_LOCKED: errno.EBUSY,
87 winerror.ERROR_LOCK_VIOLATION: errno.EACCES,
87 winerror.ERROR_LOCK_VIOLATION: errno.EACCES,
88 winerror.ERROR_LOGON_FAILURE: errno.EACCES,
88 winerror.ERROR_LOGON_FAILURE: errno.EACCES,
89 winerror.ERROR_MAPPED_ALIGNMENT: errno.EINVAL,
89 winerror.ERROR_MAPPED_ALIGNMENT: errno.EINVAL,
90 winerror.ERROR_META_EXPANSION_TOO_LONG: errno.E2BIG,
90 winerror.ERROR_META_EXPANSION_TOO_LONG: errno.E2BIG,
91 winerror.ERROR_MORE_DATA: errno.EPIPE,
91 winerror.ERROR_MORE_DATA: errno.EPIPE,
92 winerror.ERROR_NEGATIVE_SEEK: errno.ESPIPE,
92 winerror.ERROR_NEGATIVE_SEEK: errno.ESPIPE,
93 winerror.ERROR_NOACCESS: errno.EFAULT,
93 winerror.ERROR_NOACCESS: errno.EFAULT,
94 winerror.ERROR_NONE_MAPPED: errno.EINVAL,
94 winerror.ERROR_NONE_MAPPED: errno.EINVAL,
95 winerror.ERROR_NOT_ENOUGH_MEMORY: errno.ENOMEM,
95 winerror.ERROR_NOT_ENOUGH_MEMORY: errno.ENOMEM,
96 winerror.ERROR_NOT_READY: errno.EAGAIN,
96 winerror.ERROR_NOT_READY: errno.EAGAIN,
97 winerror.ERROR_NOT_SAME_DEVICE: errno.EXDEV,
97 winerror.ERROR_NOT_SAME_DEVICE: errno.EXDEV,
98 winerror.ERROR_NO_DATA: errno.EPIPE,
98 winerror.ERROR_NO_DATA: errno.EPIPE,
99 winerror.ERROR_NO_MORE_SEARCH_HANDLES: errno.EIO,
99 winerror.ERROR_NO_MORE_SEARCH_HANDLES: errno.EIO,
100 winerror.ERROR_NO_PROC_SLOTS: errno.EAGAIN,
100 winerror.ERROR_NO_PROC_SLOTS: errno.EAGAIN,
101 winerror.ERROR_NO_SUCH_PRIVILEGE: errno.EACCES,
101 winerror.ERROR_NO_SUCH_PRIVILEGE: errno.EACCES,
102 winerror.ERROR_OPEN_FAILED: errno.EIO,
102 winerror.ERROR_OPEN_FAILED: errno.EIO,
103 winerror.ERROR_OPEN_FILES: errno.EBUSY,
103 winerror.ERROR_OPEN_FILES: errno.EBUSY,
104 winerror.ERROR_OPERATION_ABORTED: errno.EINTR,
104 winerror.ERROR_OPERATION_ABORTED: errno.EINTR,
105 winerror.ERROR_OUTOFMEMORY: errno.ENOMEM,
105 winerror.ERROR_OUTOFMEMORY: errno.ENOMEM,
106 winerror.ERROR_PASSWORD_EXPIRED: errno.EACCES,
106 winerror.ERROR_PASSWORD_EXPIRED: errno.EACCES,
107 winerror.ERROR_PATH_BUSY: errno.EBUSY,
107 winerror.ERROR_PATH_BUSY: errno.EBUSY,
108 winerror.ERROR_PATH_NOT_FOUND: errno.ENOENT,
108 winerror.ERROR_PATH_NOT_FOUND: errno.ENOENT,
109 winerror.ERROR_PIPE_BUSY: errno.EBUSY,
109 winerror.ERROR_PIPE_BUSY: errno.EBUSY,
110 winerror.ERROR_PIPE_CONNECTED: errno.EPIPE,
110 winerror.ERROR_PIPE_CONNECTED: errno.EPIPE,
111 winerror.ERROR_PIPE_LISTENING: errno.EPIPE,
111 winerror.ERROR_PIPE_LISTENING: errno.EPIPE,
112 winerror.ERROR_PIPE_NOT_CONNECTED: errno.EPIPE,
112 winerror.ERROR_PIPE_NOT_CONNECTED: errno.EPIPE,
113 winerror.ERROR_PRIVILEGE_NOT_HELD: errno.EACCES,
113 winerror.ERROR_PRIVILEGE_NOT_HELD: errno.EACCES,
114 winerror.ERROR_READ_FAULT: errno.EIO,
114 winerror.ERROR_READ_FAULT: errno.EIO,
115 winerror.ERROR_SEEK: errno.EIO,
115 winerror.ERROR_SEEK: errno.EIO,
116 winerror.ERROR_SEEK_ON_DEVICE: errno.ESPIPE,
116 winerror.ERROR_SEEK_ON_DEVICE: errno.ESPIPE,
117 winerror.ERROR_SHARING_BUFFER_EXCEEDED: errno.ENFILE,
117 winerror.ERROR_SHARING_BUFFER_EXCEEDED: errno.ENFILE,
118 winerror.ERROR_SHARING_VIOLATION: errno.EACCES,
118 winerror.ERROR_SHARING_VIOLATION: errno.EACCES,
119 winerror.ERROR_STACK_OVERFLOW: errno.ENOMEM,
119 winerror.ERROR_STACK_OVERFLOW: errno.ENOMEM,
120 winerror.ERROR_SWAPERROR: errno.ENOENT,
120 winerror.ERROR_SWAPERROR: errno.ENOENT,
121 winerror.ERROR_TOO_MANY_MODULES: errno.EMFILE,
121 winerror.ERROR_TOO_MANY_MODULES: errno.EMFILE,
122 winerror.ERROR_TOO_MANY_OPEN_FILES: errno.EMFILE,
122 winerror.ERROR_TOO_MANY_OPEN_FILES: errno.EMFILE,
123 winerror.ERROR_UNRECOGNIZED_MEDIA: errno.ENXIO,
123 winerror.ERROR_UNRECOGNIZED_MEDIA: errno.ENXIO,
124 winerror.ERROR_UNRECOGNIZED_VOLUME: errno.ENODEV,
124 winerror.ERROR_UNRECOGNIZED_VOLUME: errno.ENODEV,
125 winerror.ERROR_WAIT_NO_CHILDREN: errno.ECHILD,
125 winerror.ERROR_WAIT_NO_CHILDREN: errno.ECHILD,
126 winerror.ERROR_WRITE_FAULT: errno.EIO,
126 winerror.ERROR_WRITE_FAULT: errno.EIO,
127 winerror.ERROR_WRITE_PROTECT: errno.EROFS,
127 winerror.ERROR_WRITE_PROTECT: errno.EROFS,
128 }
128 }
129
129
130 def __init__(self, err):
130 def __init__(self, err):
131 self.win_errno, self.win_function, self.win_strerror = err
131 self.win_errno, self.win_function, self.win_strerror = err
132 if self.win_strerror.endswith('.'):
132 if self.win_strerror.endswith('.'):
133 self.win_strerror = self.win_strerror[:-1]
133 self.win_strerror = self.win_strerror[:-1]
134
134
135 class WinIOError(WinError, IOError):
135 class WinIOError(WinError, IOError):
136 def __init__(self, err, filename=None):
136 def __init__(self, err, filename=None):
137 WinError.__init__(self, err)
137 WinError.__init__(self, err)
138 IOError.__init__(self, self.winerror_map.get(self.win_errno, 0),
138 IOError.__init__(self, self.winerror_map.get(self.win_errno, 0),
139 self.win_strerror)
139 self.win_strerror)
140 self.filename = filename
140 self.filename = filename
141
141
142 class WinOSError(WinError, OSError):
142 class WinOSError(WinError, OSError):
143 def __init__(self, err):
143 def __init__(self, err):
144 WinError.__init__(self, err)
144 WinError.__init__(self, err)
145 OSError.__init__(self, self.winerror_map.get(self.win_errno, 0),
145 OSError.__init__(self, self.winerror_map.get(self.win_errno, 0),
146 self.win_strerror)
146 self.win_strerror)
147
147
148 def os_link(src, dst):
148 def os_link(src, dst):
149 # NB will only succeed on NTFS
149 # NB will only succeed on NTFS
150 try:
150 try:
151 win32file.CreateHardLink(dst, src)
151 win32file.CreateHardLink(dst, src)
152 except pywintypes.error, details:
152 except pywintypes.error, details:
153 raise WinOSError(details)
153 raise WinOSError(details)
154
154
155 def nlinks(pathname):
155 def nlinks(pathname):
156 """Return number of hardlinks for the given file."""
156 """Return number of hardlinks for the given file."""
157 try:
157 try:
158 fh = win32file.CreateFile(pathname,
158 fh = win32file.CreateFile(pathname,
159 win32file.GENERIC_READ, win32file.FILE_SHARE_READ,
159 win32file.GENERIC_READ, win32file.FILE_SHARE_READ,
160 None, win32file.OPEN_EXISTING, 0, None)
160 None, win32file.OPEN_EXISTING, 0, None)
161 res = win32file.GetFileInformationByHandle(fh)
161 res = win32file.GetFileInformationByHandle(fh)
162 fh.Close()
162 fh.Close()
163 return res[7]
163 return res[7]
164 except pywintypes.error:
164 except pywintypes.error:
165 return os.stat(pathname).st_nlink
165 return os.lstat(pathname).st_nlink
166
166
167 def testpid(pid):
167 def testpid(pid):
168 '''return True if pid is still running or unable to
168 '''return True if pid is still running or unable to
169 determine, False otherwise'''
169 determine, False otherwise'''
170 try:
170 try:
171 handle = win32api.OpenProcess(
171 handle = win32api.OpenProcess(
172 win32con.PROCESS_QUERY_INFORMATION, False, pid)
172 win32con.PROCESS_QUERY_INFORMATION, False, pid)
173 if handle:
173 if handle:
174 status = win32process.GetExitCodeProcess(handle)
174 status = win32process.GetExitCodeProcess(handle)
175 return status == win32con.STILL_ACTIVE
175 return status == win32con.STILL_ACTIVE
176 except pywintypes.error, details:
176 except pywintypes.error, details:
177 return details[0] != winerror.ERROR_INVALID_PARAMETER
177 return details[0] != winerror.ERROR_INVALID_PARAMETER
178 return True
178 return True
179
179
180 def system_rcpath_win32():
180 def system_rcpath_win32():
181 '''return default os-specific hgrc search path'''
181 '''return default os-specific hgrc search path'''
182 proc = win32api.GetCurrentProcess()
182 proc = win32api.GetCurrentProcess()
183 try:
183 try:
184 # This will fail on windows < NT
184 # This will fail on windows < NT
185 filename = win32process.GetModuleFileNameEx(proc, 0)
185 filename = win32process.GetModuleFileNameEx(proc, 0)
186 except:
186 except:
187 filename = win32api.GetModuleFileName(0)
187 filename = win32api.GetModuleFileName(0)
188 return [os.path.join(os.path.dirname(filename), 'mercurial.ini')]
188 return [os.path.join(os.path.dirname(filename), 'mercurial.ini')]
189
189
190 def user_rcpath():
190 def user_rcpath():
191 '''return os-specific hgrc search path to the user dir'''
191 '''return os-specific hgrc search path to the user dir'''
192 userdir = os.path.expanduser('~')
192 userdir = os.path.expanduser('~')
193 if userdir == '~':
193 if userdir == '~':
194 # We are on win < nt: fetch the APPDATA directory location and use
194 # We are on win < nt: fetch the APPDATA directory location and use
195 # the parent directory as the user home dir.
195 # the parent directory as the user home dir.
196 appdir = shell.SHGetPathFromIDList(
196 appdir = shell.SHGetPathFromIDList(
197 shell.SHGetSpecialFolderLocation(0, shellcon.CSIDL_APPDATA))
197 shell.SHGetSpecialFolderLocation(0, shellcon.CSIDL_APPDATA))
198 userdir = os.path.dirname(appdir)
198 userdir = os.path.dirname(appdir)
199 return os.path.join(userdir, 'mercurial.ini')
199 return os.path.join(userdir, 'mercurial.ini')
200
200
201 class posixfile_nt(object):
201 class posixfile_nt(object):
202 '''file object with posix-like semantics. on windows, normal
202 '''file object with posix-like semantics. on windows, normal
203 files can not be deleted or renamed if they are open. must open
203 files can not be deleted or renamed if they are open. must open
204 with win32file.FILE_SHARE_DELETE. this flag does not exist on
204 with win32file.FILE_SHARE_DELETE. this flag does not exist on
205 windows < nt, so do not use this class there.'''
205 windows < nt, so do not use this class there.'''
206
206
207 # tried to use win32file._open_osfhandle to pass fd to os.fdopen,
207 # tried to use win32file._open_osfhandle to pass fd to os.fdopen,
208 # but does not work at all. wrap win32 file api instead.
208 # but does not work at all. wrap win32 file api instead.
209
209
210 def __init__(self, name, mode='rb'):
210 def __init__(self, name, mode='rb'):
211 access = 0
211 access = 0
212 if 'r' in mode or '+' in mode:
212 if 'r' in mode or '+' in mode:
213 access |= win32file.GENERIC_READ
213 access |= win32file.GENERIC_READ
214 if 'w' in mode or 'a' in mode:
214 if 'w' in mode or 'a' in mode:
215 access |= win32file.GENERIC_WRITE
215 access |= win32file.GENERIC_WRITE
216 if 'r' in mode:
216 if 'r' in mode:
217 creation = win32file.OPEN_EXISTING
217 creation = win32file.OPEN_EXISTING
218 elif 'a' in mode:
218 elif 'a' in mode:
219 creation = win32file.OPEN_ALWAYS
219 creation = win32file.OPEN_ALWAYS
220 else:
220 else:
221 creation = win32file.CREATE_ALWAYS
221 creation = win32file.CREATE_ALWAYS
222 try:
222 try:
223 self.handle = win32file.CreateFile(name,
223 self.handle = win32file.CreateFile(name,
224 access,
224 access,
225 win32file.FILE_SHARE_READ |
225 win32file.FILE_SHARE_READ |
226 win32file.FILE_SHARE_WRITE |
226 win32file.FILE_SHARE_WRITE |
227 win32file.FILE_SHARE_DELETE,
227 win32file.FILE_SHARE_DELETE,
228 None,
228 None,
229 creation,
229 creation,
230 win32file.FILE_ATTRIBUTE_NORMAL,
230 win32file.FILE_ATTRIBUTE_NORMAL,
231 0)
231 0)
232 except pywintypes.error, err:
232 except pywintypes.error, err:
233 raise WinIOError(err, name)
233 raise WinIOError(err, name)
234 self.closed = False
234 self.closed = False
235 self.name = name
235 self.name = name
236 self.mode = mode
236 self.mode = mode
237
237
238 def __iter__(self):
238 def __iter__(self):
239 for line in self.read().splitlines(True):
239 for line in self.read().splitlines(True):
240 yield line
240 yield line
241
241
242 def read(self, count=-1):
242 def read(self, count=-1):
243 try:
243 try:
244 cs = cStringIO.StringIO()
244 cs = cStringIO.StringIO()
245 while count:
245 while count:
246 wincount = int(count)
246 wincount = int(count)
247 if wincount == -1:
247 if wincount == -1:
248 wincount = 1048576
248 wincount = 1048576
249 val, data = win32file.ReadFile(self.handle, wincount)
249 val, data = win32file.ReadFile(self.handle, wincount)
250 if not data: break
250 if not data: break
251 cs.write(data)
251 cs.write(data)
252 if count != -1:
252 if count != -1:
253 count -= len(data)
253 count -= len(data)
254 return cs.getvalue()
254 return cs.getvalue()
255 except pywintypes.error, err:
255 except pywintypes.error, err:
256 raise WinIOError(err)
256 raise WinIOError(err)
257
257
258 def write(self, data):
258 def write(self, data):
259 try:
259 try:
260 if 'a' in self.mode:
260 if 'a' in self.mode:
261 win32file.SetFilePointer(self.handle, 0, win32file.FILE_END)
261 win32file.SetFilePointer(self.handle, 0, win32file.FILE_END)
262 nwrit = 0
262 nwrit = 0
263 while nwrit < len(data):
263 while nwrit < len(data):
264 val, nwrit = win32file.WriteFile(self.handle, data)
264 val, nwrit = win32file.WriteFile(self.handle, data)
265 data = data[nwrit:]
265 data = data[nwrit:]
266 except pywintypes.error, err:
266 except pywintypes.error, err:
267 raise WinIOError(err)
267 raise WinIOError(err)
268
268
269 def seek(self, pos, whence=0):
269 def seek(self, pos, whence=0):
270 try:
270 try:
271 win32file.SetFilePointer(self.handle, int(pos), whence)
271 win32file.SetFilePointer(self.handle, int(pos), whence)
272 except pywintypes.error, err:
272 except pywintypes.error, err:
273 raise WinIOError(err)
273 raise WinIOError(err)
274
274
275 def tell(self):
275 def tell(self):
276 try:
276 try:
277 return win32file.SetFilePointer(self.handle, 0,
277 return win32file.SetFilePointer(self.handle, 0,
278 win32file.FILE_CURRENT)
278 win32file.FILE_CURRENT)
279 except pywintypes.error, err:
279 except pywintypes.error, err:
280 raise WinIOError(err)
280 raise WinIOError(err)
281
281
282 def close(self):
282 def close(self):
283 if not self.closed:
283 if not self.closed:
284 self.handle = None
284 self.handle = None
285 self.closed = True
285 self.closed = True
286
286
287 def flush(self):
287 def flush(self):
288 try:
288 try:
289 win32file.FlushFileBuffers(self.handle)
289 win32file.FlushFileBuffers(self.handle)
290 except pywintypes.error, err:
290 except pywintypes.error, err:
291 raise WinIOError(err)
291 raise WinIOError(err)
292
292
293 def truncate(self, pos=0):
293 def truncate(self, pos=0):
294 try:
294 try:
295 win32file.SetFilePointer(self.handle, int(pos),
295 win32file.SetFilePointer(self.handle, int(pos),
296 win32file.FILE_BEGIN)
296 win32file.FILE_BEGIN)
297 win32file.SetEndOfFile(self.handle)
297 win32file.SetEndOfFile(self.handle)
298 except pywintypes.error, err:
298 except pywintypes.error, err:
299 raise WinIOError(err)
299 raise WinIOError(err)
General Comments 0
You need to be logged in to leave comments. Login now