##// END OF EJS Templates
improve walk docstrings
Matt Mackall -
r3532:26b556c1 default
parent child Browse files
Show More
@@ -1,529 +1,531 b''
1 """
1 """
2 dirstate.py - working directory tracking for mercurial
2 dirstate.py - working directory tracking for mercurial
3
3
4 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
5
5
6 This software may be used and distributed according to the terms
6 This software may be used and distributed according to the terms
7 of the GNU General Public License, incorporated herein by reference.
7 of the GNU General Public License, incorporated herein by reference.
8 """
8 """
9
9
10 from node import *
10 from node import *
11 from i18n import gettext as _
11 from i18n import gettext as _
12 from demandload import *
12 from demandload import *
13 demandload(globals(), "struct os time bisect stat strutil util re errno")
13 demandload(globals(), "struct os time bisect stat strutil util re errno")
14
14
15 class dirstate(object):
15 class dirstate(object):
16 format = ">cllll"
16 format = ">cllll"
17
17
18 def __init__(self, opener, ui, root):
18 def __init__(self, opener, ui, root):
19 self.opener = opener
19 self.opener = opener
20 self.root = root
20 self.root = root
21 self.dirty = 0
21 self.dirty = 0
22 self.ui = ui
22 self.ui = ui
23 self.map = None
23 self.map = None
24 self.pl = None
24 self.pl = None
25 self.dirs = None
25 self.dirs = None
26 self.copymap = {}
26 self.copymap = {}
27 self.ignorefunc = None
27 self.ignorefunc = None
28 self.blockignore = False
28 self.blockignore = False
29
29
30 def wjoin(self, f):
30 def wjoin(self, f):
31 return os.path.join(self.root, f)
31 return os.path.join(self.root, f)
32
32
33 def getcwd(self):
33 def getcwd(self):
34 cwd = os.getcwd()
34 cwd = os.getcwd()
35 if cwd == self.root: return ''
35 if cwd == self.root: return ''
36 return cwd[len(self.root) + 1:]
36 return cwd[len(self.root) + 1:]
37
37
38 def hgignore(self):
38 def hgignore(self):
39 '''return the contents of .hgignore files as a list of patterns.
39 '''return the contents of .hgignore files as a list of patterns.
40
40
41 the files parsed for patterns include:
41 the files parsed for patterns include:
42 .hgignore in the repository root
42 .hgignore in the repository root
43 any additional files specified in the [ui] section of ~/.hgrc
43 any additional files specified in the [ui] section of ~/.hgrc
44
44
45 trailing white space is dropped.
45 trailing white space is dropped.
46 the escape character is backslash.
46 the escape character is backslash.
47 comments start with #.
47 comments start with #.
48 empty lines are skipped.
48 empty lines are skipped.
49
49
50 lines can be of the following formats:
50 lines can be of the following formats:
51
51
52 syntax: regexp # defaults following lines to non-rooted regexps
52 syntax: regexp # defaults following lines to non-rooted regexps
53 syntax: glob # defaults following lines to non-rooted globs
53 syntax: glob # defaults following lines to non-rooted globs
54 re:pattern # non-rooted regular expression
54 re:pattern # non-rooted regular expression
55 glob:pattern # non-rooted glob
55 glob:pattern # non-rooted glob
56 pattern # pattern of the current default type'''
56 pattern # pattern of the current default type'''
57 syntaxes = {'re': 'relre:', 'regexp': 'relre:', 'glob': 'relglob:'}
57 syntaxes = {'re': 'relre:', 'regexp': 'relre:', 'glob': 'relglob:'}
58 def parselines(fp):
58 def parselines(fp):
59 for line in fp:
59 for line in fp:
60 escape = False
60 escape = False
61 for i in xrange(len(line)):
61 for i in xrange(len(line)):
62 if escape: escape = False
62 if escape: escape = False
63 elif line[i] == '\\': escape = True
63 elif line[i] == '\\': escape = True
64 elif line[i] == '#': break
64 elif line[i] == '#': break
65 line = line[:i].rstrip()
65 line = line[:i].rstrip()
66 if line: yield line
66 if line: yield line
67 repoignore = self.wjoin('.hgignore')
67 repoignore = self.wjoin('.hgignore')
68 files = [repoignore]
68 files = [repoignore]
69 files.extend(self.ui.hgignorefiles())
69 files.extend(self.ui.hgignorefiles())
70 pats = {}
70 pats = {}
71 for f in files:
71 for f in files:
72 try:
72 try:
73 pats[f] = []
73 pats[f] = []
74 fp = open(f)
74 fp = open(f)
75 syntax = 'relre:'
75 syntax = 'relre:'
76 for line in parselines(fp):
76 for line in parselines(fp):
77 if line.startswith('syntax:'):
77 if line.startswith('syntax:'):
78 s = line[7:].strip()
78 s = line[7:].strip()
79 try:
79 try:
80 syntax = syntaxes[s]
80 syntax = syntaxes[s]
81 except KeyError:
81 except KeyError:
82 self.ui.warn(_("%s: ignoring invalid "
82 self.ui.warn(_("%s: ignoring invalid "
83 "syntax '%s'\n") % (f, s))
83 "syntax '%s'\n") % (f, s))
84 continue
84 continue
85 pat = syntax + line
85 pat = syntax + line
86 for s in syntaxes.values():
86 for s in syntaxes.values():
87 if line.startswith(s):
87 if line.startswith(s):
88 pat = line
88 pat = line
89 break
89 break
90 pats[f].append(pat)
90 pats[f].append(pat)
91 except IOError, inst:
91 except IOError, inst:
92 if f != repoignore:
92 if f != repoignore:
93 self.ui.warn(_("skipping unreadable ignore file"
93 self.ui.warn(_("skipping unreadable ignore file"
94 " '%s': %s\n") % (f, inst.strerror))
94 " '%s': %s\n") % (f, inst.strerror))
95 return pats
95 return pats
96
96
97 def ignore(self, fn):
97 def ignore(self, fn):
98 '''default match function used by dirstate and
98 '''default match function used by dirstate and
99 localrepository. this honours the repository .hgignore file
99 localrepository. this honours the repository .hgignore file
100 and any other files specified in the [ui] section of .hgrc.'''
100 and any other files specified in the [ui] section of .hgrc.'''
101 if self.blockignore:
101 if self.blockignore:
102 return False
102 return False
103 if not self.ignorefunc:
103 if not self.ignorefunc:
104 ignore = self.hgignore()
104 ignore = self.hgignore()
105 allpats = []
105 allpats = []
106 [allpats.extend(patlist) for patlist in ignore.values()]
106 [allpats.extend(patlist) for patlist in ignore.values()]
107 if allpats:
107 if allpats:
108 try:
108 try:
109 files, self.ignorefunc, anypats = (
109 files, self.ignorefunc, anypats = (
110 util.matcher(self.root, inc=allpats, src='.hgignore'))
110 util.matcher(self.root, inc=allpats, src='.hgignore'))
111 except util.Abort:
111 except util.Abort:
112 # Re-raise an exception where the src is the right file
112 # Re-raise an exception where the src is the right file
113 for f, patlist in ignore.items():
113 for f, patlist in ignore.items():
114 files, self.ignorefunc, anypats = (
114 files, self.ignorefunc, anypats = (
115 util.matcher(self.root, inc=patlist, src=f))
115 util.matcher(self.root, inc=patlist, src=f))
116 else:
116 else:
117 self.ignorefunc = util.never
117 self.ignorefunc = util.never
118 return self.ignorefunc(fn)
118 return self.ignorefunc(fn)
119
119
120 def __del__(self):
120 def __del__(self):
121 if self.dirty:
121 if self.dirty:
122 self.write()
122 self.write()
123
123
124 def __getitem__(self, key):
124 def __getitem__(self, key):
125 try:
125 try:
126 return self.map[key]
126 return self.map[key]
127 except TypeError:
127 except TypeError:
128 self.lazyread()
128 self.lazyread()
129 return self[key]
129 return self[key]
130
130
131 def __contains__(self, key):
131 def __contains__(self, key):
132 self.lazyread()
132 self.lazyread()
133 return key in self.map
133 return key in self.map
134
134
135 def parents(self):
135 def parents(self):
136 self.lazyread()
136 self.lazyread()
137 return self.pl
137 return self.pl
138
138
139 def markdirty(self):
139 def markdirty(self):
140 if not self.dirty:
140 if not self.dirty:
141 self.dirty = 1
141 self.dirty = 1
142
142
143 def setparents(self, p1, p2=nullid):
143 def setparents(self, p1, p2=nullid):
144 self.lazyread()
144 self.lazyread()
145 self.markdirty()
145 self.markdirty()
146 self.pl = p1, p2
146 self.pl = p1, p2
147
147
148 def state(self, key):
148 def state(self, key):
149 try:
149 try:
150 return self[key][0]
150 return self[key][0]
151 except KeyError:
151 except KeyError:
152 return "?"
152 return "?"
153
153
154 def lazyread(self):
154 def lazyread(self):
155 if self.map is None:
155 if self.map is None:
156 self.read()
156 self.read()
157
157
158 def parse(self, st):
158 def parse(self, st):
159 self.pl = [st[:20], st[20: 40]]
159 self.pl = [st[:20], st[20: 40]]
160
160
161 # deref fields so they will be local in loop
161 # deref fields so they will be local in loop
162 map = self.map
162 map = self.map
163 copymap = self.copymap
163 copymap = self.copymap
164 format = self.format
164 format = self.format
165 unpack = struct.unpack
165 unpack = struct.unpack
166
166
167 pos = 40
167 pos = 40
168 e_size = struct.calcsize(format)
168 e_size = struct.calcsize(format)
169
169
170 while pos < len(st):
170 while pos < len(st):
171 newpos = pos + e_size
171 newpos = pos + e_size
172 e = unpack(format, st[pos:newpos])
172 e = unpack(format, st[pos:newpos])
173 l = e[4]
173 l = e[4]
174 pos = newpos
174 pos = newpos
175 newpos = pos + l
175 newpos = pos + l
176 f = st[pos:newpos]
176 f = st[pos:newpos]
177 if '\0' in f:
177 if '\0' in f:
178 f, c = f.split('\0')
178 f, c = f.split('\0')
179 copymap[f] = c
179 copymap[f] = c
180 map[f] = e[:4]
180 map[f] = e[:4]
181 pos = newpos
181 pos = newpos
182
182
183 def read(self):
183 def read(self):
184 self.map = {}
184 self.map = {}
185 self.pl = [nullid, nullid]
185 self.pl = [nullid, nullid]
186 try:
186 try:
187 st = self.opener("dirstate").read()
187 st = self.opener("dirstate").read()
188 if st:
188 if st:
189 self.parse(st)
189 self.parse(st)
190 except IOError, err:
190 except IOError, err:
191 if err.errno != errno.ENOENT: raise
191 if err.errno != errno.ENOENT: raise
192
192
193 def copy(self, source, dest):
193 def copy(self, source, dest):
194 self.lazyread()
194 self.lazyread()
195 self.markdirty()
195 self.markdirty()
196 self.copymap[dest] = source
196 self.copymap[dest] = source
197
197
198 def copied(self, file):
198 def copied(self, file):
199 return self.copymap.get(file, None)
199 return self.copymap.get(file, None)
200
200
201 def copies(self):
201 def copies(self):
202 return self.copymap
202 return self.copymap
203
203
204 def initdirs(self):
204 def initdirs(self):
205 if self.dirs is None:
205 if self.dirs is None:
206 self.dirs = {}
206 self.dirs = {}
207 for f in self.map:
207 for f in self.map:
208 self.updatedirs(f, 1)
208 self.updatedirs(f, 1)
209
209
210 def updatedirs(self, path, delta):
210 def updatedirs(self, path, delta):
211 if self.dirs is not None:
211 if self.dirs is not None:
212 for c in strutil.findall(path, '/'):
212 for c in strutil.findall(path, '/'):
213 pc = path[:c]
213 pc = path[:c]
214 self.dirs.setdefault(pc, 0)
214 self.dirs.setdefault(pc, 0)
215 self.dirs[pc] += delta
215 self.dirs[pc] += delta
216
216
217 def checkshadows(self, files):
217 def checkshadows(self, files):
218 def prefixes(f):
218 def prefixes(f):
219 for c in strutil.rfindall(f, '/'):
219 for c in strutil.rfindall(f, '/'):
220 yield f[:c]
220 yield f[:c]
221 self.lazyread()
221 self.lazyread()
222 self.initdirs()
222 self.initdirs()
223 seendirs = {}
223 seendirs = {}
224 for f in files:
224 for f in files:
225 if self.dirs.get(f):
225 if self.dirs.get(f):
226 raise util.Abort(_('directory named %r already in dirstate') %
226 raise util.Abort(_('directory named %r already in dirstate') %
227 f)
227 f)
228 for d in prefixes(f):
228 for d in prefixes(f):
229 if d in seendirs:
229 if d in seendirs:
230 break
230 break
231 if d in self.map:
231 if d in self.map:
232 raise util.Abort(_('file named %r already in dirstate') %
232 raise util.Abort(_('file named %r already in dirstate') %
233 d)
233 d)
234 seendirs[d] = True
234 seendirs[d] = True
235
235
236 def update(self, files, state, **kw):
236 def update(self, files, state, **kw):
237 ''' current states:
237 ''' current states:
238 n normal
238 n normal
239 m needs merging
239 m needs merging
240 r marked for removal
240 r marked for removal
241 a marked for addition'''
241 a marked for addition'''
242
242
243 if not files: return
243 if not files: return
244 self.lazyread()
244 self.lazyread()
245 self.markdirty()
245 self.markdirty()
246 if state == "a":
246 if state == "a":
247 self.initdirs()
247 self.initdirs()
248 self.checkshadows(files)
248 self.checkshadows(files)
249 for f in files:
249 for f in files:
250 if state == "r":
250 if state == "r":
251 self.map[f] = ('r', 0, 0, 0)
251 self.map[f] = ('r', 0, 0, 0)
252 self.updatedirs(f, -1)
252 self.updatedirs(f, -1)
253 else:
253 else:
254 if state == "a":
254 if state == "a":
255 self.updatedirs(f, 1)
255 self.updatedirs(f, 1)
256 s = os.lstat(self.wjoin(f))
256 s = os.lstat(self.wjoin(f))
257 st_size = kw.get('st_size', s.st_size)
257 st_size = kw.get('st_size', s.st_size)
258 st_mtime = kw.get('st_mtime', s.st_mtime)
258 st_mtime = kw.get('st_mtime', s.st_mtime)
259 self.map[f] = (state, s.st_mode, st_size, st_mtime)
259 self.map[f] = (state, s.st_mode, st_size, st_mtime)
260 if self.copymap.has_key(f):
260 if self.copymap.has_key(f):
261 del self.copymap[f]
261 del self.copymap[f]
262
262
263 def forget(self, files):
263 def forget(self, files):
264 if not files: return
264 if not files: return
265 self.lazyread()
265 self.lazyread()
266 self.markdirty()
266 self.markdirty()
267 self.initdirs()
267 self.initdirs()
268 for f in files:
268 for f in files:
269 try:
269 try:
270 del self.map[f]
270 del self.map[f]
271 self.updatedirs(f, -1)
271 self.updatedirs(f, -1)
272 except KeyError:
272 except KeyError:
273 self.ui.warn(_("not in dirstate: %s!\n") % f)
273 self.ui.warn(_("not in dirstate: %s!\n") % f)
274 pass
274 pass
275
275
276 def clear(self):
276 def clear(self):
277 self.map = {}
277 self.map = {}
278 self.copymap = {}
278 self.copymap = {}
279 self.dirs = None
279 self.dirs = None
280 self.markdirty()
280 self.markdirty()
281
281
282 def rebuild(self, parent, files):
282 def rebuild(self, parent, files):
283 self.clear()
283 self.clear()
284 umask = os.umask(0)
284 umask = os.umask(0)
285 os.umask(umask)
285 os.umask(umask)
286 for f in files:
286 for f in files:
287 if files.execf(f):
287 if files.execf(f):
288 self.map[f] = ('n', ~umask, -1, 0)
288 self.map[f] = ('n', ~umask, -1, 0)
289 else:
289 else:
290 self.map[f] = ('n', ~umask & 0666, -1, 0)
290 self.map[f] = ('n', ~umask & 0666, -1, 0)
291 self.pl = (parent, nullid)
291 self.pl = (parent, nullid)
292 self.markdirty()
292 self.markdirty()
293
293
294 def write(self):
294 def write(self):
295 if not self.dirty:
295 if not self.dirty:
296 return
296 return
297 st = self.opener("dirstate", "w", atomic=True)
297 st = self.opener("dirstate", "w", atomic=True)
298 st.write("".join(self.pl))
298 st.write("".join(self.pl))
299 for f, e in self.map.items():
299 for f, e in self.map.items():
300 c = self.copied(f)
300 c = self.copied(f)
301 if c:
301 if c:
302 f = f + "\0" + c
302 f = f + "\0" + c
303 e = struct.pack(self.format, e[0], e[1], e[2], e[3], len(f))
303 e = struct.pack(self.format, e[0], e[1], e[2], e[3], len(f))
304 st.write(e + f)
304 st.write(e + f)
305 self.dirty = 0
305 self.dirty = 0
306
306
307 def filterfiles(self, files):
307 def filterfiles(self, files):
308 ret = {}
308 ret = {}
309 unknown = []
309 unknown = []
310
310
311 for x in files:
311 for x in files:
312 if x == '.':
312 if x == '.':
313 return self.map.copy()
313 return self.map.copy()
314 if x not in self.map:
314 if x not in self.map:
315 unknown.append(x)
315 unknown.append(x)
316 else:
316 else:
317 ret[x] = self.map[x]
317 ret[x] = self.map[x]
318
318
319 if not unknown:
319 if not unknown:
320 return ret
320 return ret
321
321
322 b = self.map.keys()
322 b = self.map.keys()
323 b.sort()
323 b.sort()
324 blen = len(b)
324 blen = len(b)
325
325
326 for x in unknown:
326 for x in unknown:
327 bs = bisect.bisect(b, "%s%s" % (x, '/'))
327 bs = bisect.bisect(b, "%s%s" % (x, '/'))
328 while bs < blen:
328 while bs < blen:
329 s = b[bs]
329 s = b[bs]
330 if len(s) > len(x) and s.startswith(x):
330 if len(s) > len(x) and s.startswith(x):
331 ret[s] = self.map[s]
331 ret[s] = self.map[s]
332 else:
332 else:
333 break
333 break
334 bs += 1
334 bs += 1
335 return ret
335 return ret
336
336
337 def supported_type(self, f, st, verbose=False):
337 def supported_type(self, f, st, verbose=False):
338 if stat.S_ISREG(st.st_mode):
338 if stat.S_ISREG(st.st_mode):
339 return True
339 return True
340 if verbose:
340 if verbose:
341 kind = 'unknown'
341 kind = 'unknown'
342 if stat.S_ISCHR(st.st_mode): kind = _('character device')
342 if stat.S_ISCHR(st.st_mode): kind = _('character device')
343 elif stat.S_ISBLK(st.st_mode): kind = _('block device')
343 elif stat.S_ISBLK(st.st_mode): kind = _('block device')
344 elif stat.S_ISFIFO(st.st_mode): kind = _('fifo')
344 elif stat.S_ISFIFO(st.st_mode): kind = _('fifo')
345 elif stat.S_ISLNK(st.st_mode): kind = _('symbolic link')
345 elif stat.S_ISLNK(st.st_mode): kind = _('symbolic link')
346 elif stat.S_ISSOCK(st.st_mode): kind = _('socket')
346 elif stat.S_ISSOCK(st.st_mode): kind = _('socket')
347 elif stat.S_ISDIR(st.st_mode): kind = _('directory')
347 elif stat.S_ISDIR(st.st_mode): kind = _('directory')
348 self.ui.warn(_('%s: unsupported file type (type is %s)\n') % (
348 self.ui.warn(_('%s: unsupported file type (type is %s)\n') % (
349 util.pathto(self.getcwd(), f),
349 util.pathto(self.getcwd(), f),
350 kind))
350 kind))
351 return False
351 return False
352
352
353 def walk(self, files=None, match=util.always, badmatch=None):
353 def walk(self, files=None, match=util.always, badmatch=None):
354 # filter out the stat
354 # filter out the stat
355 for src, f, st in self.statwalk(files, match, badmatch=badmatch):
355 for src, f, st in self.statwalk(files, match, badmatch=badmatch):
356 yield src, f
356 yield src, f
357
357
358 def statwalk(self, files=None, match=util.always, ignored=False,
358 def statwalk(self, files=None, match=util.always, ignored=False,
359 badmatch=None):
359 badmatch=None):
360 '''
360 '''
361 walk recursively through the directory tree, finding all files
361 walk recursively through the directory tree, finding all files
362 matched by the match function
362 matched by the match function
363
363
364 results are yielded in a tuple (src, filename, st), where src
364 results are yielded in a tuple (src, filename, st), where src
365 is one of:
365 is one of:
366 'f' the file was found in the directory tree
366 'f' the file was found in the directory tree
367 'm' the file was only in the dirstate and not in the tree
367 'm' the file was only in the dirstate and not in the tree
368 'b' file was not found and matched badmatch
369
368 and st is the stat result if the file was found in the directory.
370 and st is the stat result if the file was found in the directory.
369 '''
371 '''
370 self.lazyread()
372 self.lazyread()
371
373
372 # walk all files by default
374 # walk all files by default
373 if not files:
375 if not files:
374 files = [self.root]
376 files = [self.root]
375 dc = self.map.copy()
377 dc = self.map.copy()
376 else:
378 else:
377 dc = self.filterfiles(files)
379 dc = self.filterfiles(files)
378
380
379 def imatch(file_):
381 def imatch(file_):
380 file_ = util.pconvert(file_)
382 file_ = util.pconvert(file_)
381 if not ignored and file_ not in dc and self.ignore(file_):
383 if not ignored and file_ not in dc and self.ignore(file_):
382 return False
384 return False
383 return match(file_)
385 return match(file_)
384
386
385 # self.root may end with a path separator when self.root == '/'
387 # self.root may end with a path separator when self.root == '/'
386 common_prefix_len = len(self.root)
388 common_prefix_len = len(self.root)
387 if not self.root.endswith('/'):
389 if not self.root.endswith('/'):
388 common_prefix_len += 1
390 common_prefix_len += 1
389 # recursion free walker, faster than os.walk.
391 # recursion free walker, faster than os.walk.
390 def findfiles(s):
392 def findfiles(s):
391 work = [s]
393 work = [s]
392 while work:
394 while work:
393 top = work.pop()
395 top = work.pop()
394 names = os.listdir(top)
396 names = os.listdir(top)
395 names.sort()
397 names.sort()
396 # nd is the top of the repository dir tree
398 # nd is the top of the repository dir tree
397 nd = util.normpath(top[common_prefix_len:])
399 nd = util.normpath(top[common_prefix_len:])
398 if nd == '.':
400 if nd == '.':
399 nd = ''
401 nd = ''
400 else:
402 else:
401 # do not recurse into a repo contained in this
403 # do not recurse into a repo contained in this
402 # one. use bisect to find .hg directory so speed
404 # one. use bisect to find .hg directory so speed
403 # is good on big directory.
405 # is good on big directory.
404 hg = bisect.bisect_left(names, '.hg')
406 hg = bisect.bisect_left(names, '.hg')
405 if hg < len(names) and names[hg] == '.hg':
407 if hg < len(names) and names[hg] == '.hg':
406 if os.path.isdir(os.path.join(top, '.hg')):
408 if os.path.isdir(os.path.join(top, '.hg')):
407 continue
409 continue
408 for f in names:
410 for f in names:
409 np = util.pconvert(os.path.join(nd, f))
411 np = util.pconvert(os.path.join(nd, f))
410 if seen(np):
412 if seen(np):
411 continue
413 continue
412 p = os.path.join(top, f)
414 p = os.path.join(top, f)
413 # don't trip over symlinks
415 # don't trip over symlinks
414 st = os.lstat(p)
416 st = os.lstat(p)
415 if stat.S_ISDIR(st.st_mode):
417 if stat.S_ISDIR(st.st_mode):
416 ds = os.path.join(nd, f +'/')
418 ds = os.path.join(nd, f +'/')
417 if imatch(ds):
419 if imatch(ds):
418 work.append(p)
420 work.append(p)
419 if imatch(np) and np in dc:
421 if imatch(np) and np in dc:
420 yield 'm', np, st
422 yield 'm', np, st
421 elif imatch(np):
423 elif imatch(np):
422 if self.supported_type(np, st):
424 if self.supported_type(np, st):
423 yield 'f', np, st
425 yield 'f', np, st
424 elif np in dc:
426 elif np in dc:
425 yield 'm', np, st
427 yield 'm', np, st
426
428
427 known = {'.hg': 1}
429 known = {'.hg': 1}
428 def seen(fn):
430 def seen(fn):
429 if fn in known: return True
431 if fn in known: return True
430 known[fn] = 1
432 known[fn] = 1
431
433
432 # step one, find all files that match our criteria
434 # step one, find all files that match our criteria
433 files.sort()
435 files.sort()
434 for ff in util.unique(files):
436 for ff in util.unique(files):
435 f = self.wjoin(ff)
437 f = self.wjoin(ff)
436 try:
438 try:
437 st = os.lstat(f)
439 st = os.lstat(f)
438 except OSError, inst:
440 except OSError, inst:
439 nf = util.normpath(ff)
441 nf = util.normpath(ff)
440 found = False
442 found = False
441 for fn in dc:
443 for fn in dc:
442 if nf == fn or (fn.startswith(nf) and fn[len(nf)] == '/'):
444 if nf == fn or (fn.startswith(nf) and fn[len(nf)] == '/'):
443 found = True
445 found = True
444 break
446 break
445 if not found:
447 if not found:
446 if inst.errno != errno.ENOENT or not badmatch:
448 if inst.errno != errno.ENOENT or not badmatch:
447 self.ui.warn('%s: %s\n' % (
449 self.ui.warn('%s: %s\n' % (
448 util.pathto(self.getcwd(), ff),
450 util.pathto(self.getcwd(), ff),
449 inst.strerror))
451 inst.strerror))
450 elif badmatch and badmatch(ff) and imatch(ff):
452 elif badmatch and badmatch(ff) and imatch(ff):
451 yield 'b', ff, None
453 yield 'b', ff, None
452 continue
454 continue
453 if stat.S_ISDIR(st.st_mode):
455 if stat.S_ISDIR(st.st_mode):
454 cmp1 = (lambda x, y: cmp(x[1], y[1]))
456 cmp1 = (lambda x, y: cmp(x[1], y[1]))
455 sorted_ = [ x for x in findfiles(f) ]
457 sorted_ = [ x for x in findfiles(f) ]
456 sorted_.sort(cmp1)
458 sorted_.sort(cmp1)
457 for e in sorted_:
459 for e in sorted_:
458 yield e
460 yield e
459 else:
461 else:
460 ff = util.normpath(ff)
462 ff = util.normpath(ff)
461 if seen(ff):
463 if seen(ff):
462 continue
464 continue
463 self.blockignore = True
465 self.blockignore = True
464 if imatch(ff):
466 if imatch(ff):
465 if self.supported_type(ff, st, verbose=True):
467 if self.supported_type(ff, st, verbose=True):
466 yield 'f', ff, st
468 yield 'f', ff, st
467 elif ff in dc:
469 elif ff in dc:
468 yield 'm', ff, st
470 yield 'm', ff, st
469 self.blockignore = False
471 self.blockignore = False
470
472
471 # step two run through anything left in the dc hash and yield
473 # step two run through anything left in the dc hash and yield
472 # if we haven't already seen it
474 # if we haven't already seen it
473 ks = dc.keys()
475 ks = dc.keys()
474 ks.sort()
476 ks.sort()
475 for k in ks:
477 for k in ks:
476 if not seen(k) and imatch(k):
478 if not seen(k) and imatch(k):
477 yield 'm', k, None
479 yield 'm', k, None
478
480
479 def status(self, files=None, match=util.always, list_ignored=False,
481 def status(self, files=None, match=util.always, list_ignored=False,
480 list_clean=False):
482 list_clean=False):
481 lookup, modified, added, unknown, ignored = [], [], [], [], []
483 lookup, modified, added, unknown, ignored = [], [], [], [], []
482 removed, deleted, clean = [], [], []
484 removed, deleted, clean = [], [], []
483
485
484 for src, fn, st in self.statwalk(files, match, ignored=list_ignored):
486 for src, fn, st in self.statwalk(files, match, ignored=list_ignored):
485 try:
487 try:
486 type_, mode, size, time = self[fn]
488 type_, mode, size, time = self[fn]
487 except KeyError:
489 except KeyError:
488 if list_ignored and self.ignore(fn):
490 if list_ignored and self.ignore(fn):
489 ignored.append(fn)
491 ignored.append(fn)
490 else:
492 else:
491 unknown.append(fn)
493 unknown.append(fn)
492 continue
494 continue
493 if src == 'm':
495 if src == 'm':
494 nonexistent = True
496 nonexistent = True
495 if not st:
497 if not st:
496 try:
498 try:
497 st = os.lstat(self.wjoin(fn))
499 st = os.lstat(self.wjoin(fn))
498 except OSError, inst:
500 except OSError, inst:
499 if inst.errno != errno.ENOENT:
501 if inst.errno != errno.ENOENT:
500 raise
502 raise
501 st = None
503 st = None
502 # We need to re-check that it is a valid file
504 # We need to re-check that it is a valid file
503 if st and self.supported_type(fn, st):
505 if st and self.supported_type(fn, st):
504 nonexistent = False
506 nonexistent = False
505 # XXX: what to do with file no longer present in the fs
507 # XXX: what to do with file no longer present in the fs
506 # who are not removed in the dirstate ?
508 # who are not removed in the dirstate ?
507 if nonexistent and type_ in "nm":
509 if nonexistent and type_ in "nm":
508 deleted.append(fn)
510 deleted.append(fn)
509 continue
511 continue
510 # check the common case first
512 # check the common case first
511 if type_ == 'n':
513 if type_ == 'n':
512 if not st:
514 if not st:
513 st = os.lstat(self.wjoin(fn))
515 st = os.lstat(self.wjoin(fn))
514 if size >= 0 and (size != st.st_size
516 if size >= 0 and (size != st.st_size
515 or (mode ^ st.st_mode) & 0100):
517 or (mode ^ st.st_mode) & 0100):
516 modified.append(fn)
518 modified.append(fn)
517 elif time != int(st.st_mtime):
519 elif time != int(st.st_mtime):
518 lookup.append(fn)
520 lookup.append(fn)
519 elif list_clean:
521 elif list_clean:
520 clean.append(fn)
522 clean.append(fn)
521 elif type_ == 'm':
523 elif type_ == 'm':
522 modified.append(fn)
524 modified.append(fn)
523 elif type_ == 'a':
525 elif type_ == 'a':
524 added.append(fn)
526 added.append(fn)
525 elif type_ == 'r':
527 elif type_ == 'r':
526 removed.append(fn)
528 removed.append(fn)
527
529
528 return (lookup, modified, added, removed, deleted, unknown, ignored,
530 return (lookup, modified, added, removed, deleted, unknown, ignored,
529 clean)
531 clean)
@@ -1,1841 +1,1853 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import *
8 from node import *
9 from i18n import gettext as _
9 from i18n import gettext as _
10 from demandload import *
10 from demandload import *
11 import repo
11 import repo
12 demandload(globals(), "appendfile changegroup")
12 demandload(globals(), "appendfile changegroup")
13 demandload(globals(), "changelog dirstate filelog manifest context")
13 demandload(globals(), "changelog dirstate filelog manifest context")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 demandload(globals(), "os revlog time util")
15 demandload(globals(), "os revlog time util")
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = ('lookup', 'changegroupsubset')
18 capabilities = ('lookup', 'changegroupsubset')
19
19
20 def __del__(self):
20 def __del__(self):
21 self.transhandle = None
21 self.transhandle = None
22 def __init__(self, parentui, path=None, create=0):
22 def __init__(self, parentui, path=None, create=0):
23 repo.repository.__init__(self)
23 repo.repository.__init__(self)
24 if not path:
24 if not path:
25 p = os.getcwd()
25 p = os.getcwd()
26 while not os.path.isdir(os.path.join(p, ".hg")):
26 while not os.path.isdir(os.path.join(p, ".hg")):
27 oldp = p
27 oldp = p
28 p = os.path.dirname(p)
28 p = os.path.dirname(p)
29 if p == oldp:
29 if p == oldp:
30 raise repo.RepoError(_("There is no Mercurial repository"
30 raise repo.RepoError(_("There is no Mercurial repository"
31 " here (.hg not found)"))
31 " here (.hg not found)"))
32 path = p
32 path = p
33 self.path = os.path.join(path, ".hg")
33 self.path = os.path.join(path, ".hg")
34
34
35 if not os.path.isdir(self.path):
35 if not os.path.isdir(self.path):
36 if create:
36 if create:
37 if not os.path.exists(path):
37 if not os.path.exists(path):
38 os.mkdir(path)
38 os.mkdir(path)
39 os.mkdir(self.path)
39 os.mkdir(self.path)
40 os.mkdir(self.join("data"))
40 os.mkdir(self.join("data"))
41 else:
41 else:
42 raise repo.RepoError(_("repository %s not found") % path)
42 raise repo.RepoError(_("repository %s not found") % path)
43 elif create:
43 elif create:
44 raise repo.RepoError(_("repository %s already exists") % path)
44 raise repo.RepoError(_("repository %s already exists") % path)
45
45
46 self.root = os.path.abspath(path)
46 self.root = os.path.abspath(path)
47 self.origroot = path
47 self.origroot = path
48 self.ui = ui.ui(parentui=parentui)
48 self.ui = ui.ui(parentui=parentui)
49 self.opener = util.opener(self.path)
49 self.opener = util.opener(self.path)
50 self.sopener = util.opener(self.path)
50 self.sopener = util.opener(self.path)
51 self.wopener = util.opener(self.root)
51 self.wopener = util.opener(self.root)
52
52
53 try:
53 try:
54 self.ui.readconfig(self.join("hgrc"), self.root)
54 self.ui.readconfig(self.join("hgrc"), self.root)
55 except IOError:
55 except IOError:
56 pass
56 pass
57
57
58 v = self.ui.configrevlog()
58 v = self.ui.configrevlog()
59 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
59 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
60 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
60 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
61 fl = v.get('flags', None)
61 fl = v.get('flags', None)
62 flags = 0
62 flags = 0
63 if fl != None:
63 if fl != None:
64 for x in fl.split():
64 for x in fl.split():
65 flags |= revlog.flagstr(x)
65 flags |= revlog.flagstr(x)
66 elif self.revlogv1:
66 elif self.revlogv1:
67 flags = revlog.REVLOG_DEFAULT_FLAGS
67 flags = revlog.REVLOG_DEFAULT_FLAGS
68
68
69 v = self.revlogversion | flags
69 v = self.revlogversion | flags
70 self.manifest = manifest.manifest(self.sopener, v)
70 self.manifest = manifest.manifest(self.sopener, v)
71 self.changelog = changelog.changelog(self.sopener, v)
71 self.changelog = changelog.changelog(self.sopener, v)
72
72
73 # the changelog might not have the inline index flag
73 # the changelog might not have the inline index flag
74 # on. If the format of the changelog is the same as found in
74 # on. If the format of the changelog is the same as found in
75 # .hgrc, apply any flags found in the .hgrc as well.
75 # .hgrc, apply any flags found in the .hgrc as well.
76 # Otherwise, just version from the changelog
76 # Otherwise, just version from the changelog
77 v = self.changelog.version
77 v = self.changelog.version
78 if v == self.revlogversion:
78 if v == self.revlogversion:
79 v |= flags
79 v |= flags
80 self.revlogversion = v
80 self.revlogversion = v
81
81
82 self.tagscache = None
82 self.tagscache = None
83 self.branchcache = None
83 self.branchcache = None
84 self.nodetagscache = None
84 self.nodetagscache = None
85 self.encodepats = None
85 self.encodepats = None
86 self.decodepats = None
86 self.decodepats = None
87 self.transhandle = None
87 self.transhandle = None
88
88
89 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
89 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
90
90
91 def url(self):
91 def url(self):
92 return 'file:' + self.root
92 return 'file:' + self.root
93
93
94 def hook(self, name, throw=False, **args):
94 def hook(self, name, throw=False, **args):
95 def callhook(hname, funcname):
95 def callhook(hname, funcname):
96 '''call python hook. hook is callable object, looked up as
96 '''call python hook. hook is callable object, looked up as
97 name in python module. if callable returns "true", hook
97 name in python module. if callable returns "true", hook
98 fails, else passes. if hook raises exception, treated as
98 fails, else passes. if hook raises exception, treated as
99 hook failure. exception propagates if throw is "true".
99 hook failure. exception propagates if throw is "true".
100
100
101 reason for "true" meaning "hook failed" is so that
101 reason for "true" meaning "hook failed" is so that
102 unmodified commands (e.g. mercurial.commands.update) can
102 unmodified commands (e.g. mercurial.commands.update) can
103 be run as hooks without wrappers to convert return values.'''
103 be run as hooks without wrappers to convert return values.'''
104
104
105 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
105 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
106 d = funcname.rfind('.')
106 d = funcname.rfind('.')
107 if d == -1:
107 if d == -1:
108 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
108 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
109 % (hname, funcname))
109 % (hname, funcname))
110 modname = funcname[:d]
110 modname = funcname[:d]
111 try:
111 try:
112 obj = __import__(modname)
112 obj = __import__(modname)
113 except ImportError:
113 except ImportError:
114 try:
114 try:
115 # extensions are loaded with hgext_ prefix
115 # extensions are loaded with hgext_ prefix
116 obj = __import__("hgext_%s" % modname)
116 obj = __import__("hgext_%s" % modname)
117 except ImportError:
117 except ImportError:
118 raise util.Abort(_('%s hook is invalid '
118 raise util.Abort(_('%s hook is invalid '
119 '(import of "%s" failed)') %
119 '(import of "%s" failed)') %
120 (hname, modname))
120 (hname, modname))
121 try:
121 try:
122 for p in funcname.split('.')[1:]:
122 for p in funcname.split('.')[1:]:
123 obj = getattr(obj, p)
123 obj = getattr(obj, p)
124 except AttributeError, err:
124 except AttributeError, err:
125 raise util.Abort(_('%s hook is invalid '
125 raise util.Abort(_('%s hook is invalid '
126 '("%s" is not defined)') %
126 '("%s" is not defined)') %
127 (hname, funcname))
127 (hname, funcname))
128 if not callable(obj):
128 if not callable(obj):
129 raise util.Abort(_('%s hook is invalid '
129 raise util.Abort(_('%s hook is invalid '
130 '("%s" is not callable)') %
130 '("%s" is not callable)') %
131 (hname, funcname))
131 (hname, funcname))
132 try:
132 try:
133 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
133 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
134 except (KeyboardInterrupt, util.SignalInterrupt):
134 except (KeyboardInterrupt, util.SignalInterrupt):
135 raise
135 raise
136 except Exception, exc:
136 except Exception, exc:
137 if isinstance(exc, util.Abort):
137 if isinstance(exc, util.Abort):
138 self.ui.warn(_('error: %s hook failed: %s\n') %
138 self.ui.warn(_('error: %s hook failed: %s\n') %
139 (hname, exc.args[0]))
139 (hname, exc.args[0]))
140 else:
140 else:
141 self.ui.warn(_('error: %s hook raised an exception: '
141 self.ui.warn(_('error: %s hook raised an exception: '
142 '%s\n') % (hname, exc))
142 '%s\n') % (hname, exc))
143 if throw:
143 if throw:
144 raise
144 raise
145 self.ui.print_exc()
145 self.ui.print_exc()
146 return True
146 return True
147 if r:
147 if r:
148 if throw:
148 if throw:
149 raise util.Abort(_('%s hook failed') % hname)
149 raise util.Abort(_('%s hook failed') % hname)
150 self.ui.warn(_('warning: %s hook failed\n') % hname)
150 self.ui.warn(_('warning: %s hook failed\n') % hname)
151 return r
151 return r
152
152
153 def runhook(name, cmd):
153 def runhook(name, cmd):
154 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
154 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
155 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
155 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
156 r = util.system(cmd, environ=env, cwd=self.root)
156 r = util.system(cmd, environ=env, cwd=self.root)
157 if r:
157 if r:
158 desc, r = util.explain_exit(r)
158 desc, r = util.explain_exit(r)
159 if throw:
159 if throw:
160 raise util.Abort(_('%s hook %s') % (name, desc))
160 raise util.Abort(_('%s hook %s') % (name, desc))
161 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
161 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
162 return r
162 return r
163
163
164 r = False
164 r = False
165 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
165 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
166 if hname.split(".", 1)[0] == name and cmd]
166 if hname.split(".", 1)[0] == name and cmd]
167 hooks.sort()
167 hooks.sort()
168 for hname, cmd in hooks:
168 for hname, cmd in hooks:
169 if cmd.startswith('python:'):
169 if cmd.startswith('python:'):
170 r = callhook(hname, cmd[7:].strip()) or r
170 r = callhook(hname, cmd[7:].strip()) or r
171 else:
171 else:
172 r = runhook(hname, cmd) or r
172 r = runhook(hname, cmd) or r
173 return r
173 return r
174
174
175 tag_disallowed = ':\r\n'
175 tag_disallowed = ':\r\n'
176
176
177 def tag(self, name, node, message, local, user, date):
177 def tag(self, name, node, message, local, user, date):
178 '''tag a revision with a symbolic name.
178 '''tag a revision with a symbolic name.
179
179
180 if local is True, the tag is stored in a per-repository file.
180 if local is True, the tag is stored in a per-repository file.
181 otherwise, it is stored in the .hgtags file, and a new
181 otherwise, it is stored in the .hgtags file, and a new
182 changeset is committed with the change.
182 changeset is committed with the change.
183
183
184 keyword arguments:
184 keyword arguments:
185
185
186 local: whether to store tag in non-version-controlled file
186 local: whether to store tag in non-version-controlled file
187 (default False)
187 (default False)
188
188
189 message: commit message to use if committing
189 message: commit message to use if committing
190
190
191 user: name of user to use if committing
191 user: name of user to use if committing
192
192
193 date: date tuple to use if committing'''
193 date: date tuple to use if committing'''
194
194
195 for c in self.tag_disallowed:
195 for c in self.tag_disallowed:
196 if c in name:
196 if c in name:
197 raise util.Abort(_('%r cannot be used in a tag name') % c)
197 raise util.Abort(_('%r cannot be used in a tag name') % c)
198
198
199 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
199 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
200
200
201 if local:
201 if local:
202 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
202 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
203 self.hook('tag', node=hex(node), tag=name, local=local)
203 self.hook('tag', node=hex(node), tag=name, local=local)
204 return
204 return
205
205
206 for x in self.status()[:5]:
206 for x in self.status()[:5]:
207 if '.hgtags' in x:
207 if '.hgtags' in x:
208 raise util.Abort(_('working copy of .hgtags is changed '
208 raise util.Abort(_('working copy of .hgtags is changed '
209 '(please commit .hgtags manually)'))
209 '(please commit .hgtags manually)'))
210
210
211 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
211 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
212 if self.dirstate.state('.hgtags') == '?':
212 if self.dirstate.state('.hgtags') == '?':
213 self.add(['.hgtags'])
213 self.add(['.hgtags'])
214
214
215 self.commit(['.hgtags'], message, user, date)
215 self.commit(['.hgtags'], message, user, date)
216 self.hook('tag', node=hex(node), tag=name, local=local)
216 self.hook('tag', node=hex(node), tag=name, local=local)
217
217
218 def tags(self):
218 def tags(self):
219 '''return a mapping of tag to node'''
219 '''return a mapping of tag to node'''
220 if not self.tagscache:
220 if not self.tagscache:
221 self.tagscache = {}
221 self.tagscache = {}
222
222
223 def parsetag(line, context):
223 def parsetag(line, context):
224 if not line:
224 if not line:
225 return
225 return
226 s = l.split(" ", 1)
226 s = l.split(" ", 1)
227 if len(s) != 2:
227 if len(s) != 2:
228 self.ui.warn(_("%s: cannot parse entry\n") % context)
228 self.ui.warn(_("%s: cannot parse entry\n") % context)
229 return
229 return
230 node, key = s
230 node, key = s
231 key = key.strip()
231 key = key.strip()
232 try:
232 try:
233 bin_n = bin(node)
233 bin_n = bin(node)
234 except TypeError:
234 except TypeError:
235 self.ui.warn(_("%s: node '%s' is not well formed\n") %
235 self.ui.warn(_("%s: node '%s' is not well formed\n") %
236 (context, node))
236 (context, node))
237 return
237 return
238 if bin_n not in self.changelog.nodemap:
238 if bin_n not in self.changelog.nodemap:
239 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
239 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
240 (context, key))
240 (context, key))
241 return
241 return
242 self.tagscache[key] = bin_n
242 self.tagscache[key] = bin_n
243
243
244 # read the tags file from each head, ending with the tip,
244 # read the tags file from each head, ending with the tip,
245 # and add each tag found to the map, with "newer" ones
245 # and add each tag found to the map, with "newer" ones
246 # taking precedence
246 # taking precedence
247 heads = self.heads()
247 heads = self.heads()
248 heads.reverse()
248 heads.reverse()
249 seen = {}
249 seen = {}
250 for node in heads:
250 for node in heads:
251 f = self.filectx('.hgtags', node)
251 f = self.filectx('.hgtags', node)
252 if not f or f.filerev() in seen: continue
252 if not f or f.filerev() in seen: continue
253 seen[f.filerev()] = 1
253 seen[f.filerev()] = 1
254 count = 0
254 count = 0
255 for l in f.data().splitlines():
255 for l in f.data().splitlines():
256 count += 1
256 count += 1
257 parsetag(l, _("%s, line %d") % (str(f), count))
257 parsetag(l, _("%s, line %d") % (str(f), count))
258
258
259 try:
259 try:
260 f = self.opener("localtags")
260 f = self.opener("localtags")
261 count = 0
261 count = 0
262 for l in f:
262 for l in f:
263 count += 1
263 count += 1
264 parsetag(l, _("localtags, line %d") % count)
264 parsetag(l, _("localtags, line %d") % count)
265 except IOError:
265 except IOError:
266 pass
266 pass
267
267
268 self.tagscache['tip'] = self.changelog.tip()
268 self.tagscache['tip'] = self.changelog.tip()
269
269
270 return self.tagscache
270 return self.tagscache
271
271
272 def tagslist(self):
272 def tagslist(self):
273 '''return a list of tags ordered by revision'''
273 '''return a list of tags ordered by revision'''
274 l = []
274 l = []
275 for t, n in self.tags().items():
275 for t, n in self.tags().items():
276 try:
276 try:
277 r = self.changelog.rev(n)
277 r = self.changelog.rev(n)
278 except:
278 except:
279 r = -2 # sort to the beginning of the list if unknown
279 r = -2 # sort to the beginning of the list if unknown
280 l.append((r, t, n))
280 l.append((r, t, n))
281 l.sort()
281 l.sort()
282 return [(t, n) for r, t, n in l]
282 return [(t, n) for r, t, n in l]
283
283
284 def nodetags(self, node):
284 def nodetags(self, node):
285 '''return the tags associated with a node'''
285 '''return the tags associated with a node'''
286 if not self.nodetagscache:
286 if not self.nodetagscache:
287 self.nodetagscache = {}
287 self.nodetagscache = {}
288 for t, n in self.tags().items():
288 for t, n in self.tags().items():
289 self.nodetagscache.setdefault(n, []).append(t)
289 self.nodetagscache.setdefault(n, []).append(t)
290 return self.nodetagscache.get(node, [])
290 return self.nodetagscache.get(node, [])
291
291
292 def branchtags(self):
292 def branchtags(self):
293 if self.branchcache != None:
293 if self.branchcache != None:
294 return self.branchcache
294 return self.branchcache
295
295
296 self.branchcache = {} # avoid recursion in changectx
296 self.branchcache = {} # avoid recursion in changectx
297
297
298 partial, last, lrev = self._readbranchcache()
298 partial, last, lrev = self._readbranchcache()
299
299
300 tiprev = self.changelog.count() - 1
300 tiprev = self.changelog.count() - 1
301 if lrev != tiprev:
301 if lrev != tiprev:
302 self._updatebranchcache(partial, lrev+1, tiprev+1)
302 self._updatebranchcache(partial, lrev+1, tiprev+1)
303 self._writebranchcache(partial, self.changelog.tip(), tiprev)
303 self._writebranchcache(partial, self.changelog.tip(), tiprev)
304
304
305 self.branchcache = partial
305 self.branchcache = partial
306 return self.branchcache
306 return self.branchcache
307
307
308 def _readbranchcache(self):
308 def _readbranchcache(self):
309 partial = {}
309 partial = {}
310 try:
310 try:
311 f = self.opener("branches.cache")
311 f = self.opener("branches.cache")
312 last, lrev = f.readline().rstrip().split(" ", 1)
312 last, lrev = f.readline().rstrip().split(" ", 1)
313 last, lrev = bin(last), int(lrev)
313 last, lrev = bin(last), int(lrev)
314 if (lrev < self.changelog.count() and
314 if (lrev < self.changelog.count() and
315 self.changelog.node(lrev) == last): # sanity check
315 self.changelog.node(lrev) == last): # sanity check
316 for l in f:
316 for l in f:
317 node, label = l.rstrip().split(" ", 1)
317 node, label = l.rstrip().split(" ", 1)
318 partial[label] = bin(node)
318 partial[label] = bin(node)
319 else: # invalidate the cache
319 else: # invalidate the cache
320 last, lrev = nullid, -1
320 last, lrev = nullid, -1
321 f.close()
321 f.close()
322 except IOError:
322 except IOError:
323 last, lrev = nullid, -1
323 last, lrev = nullid, -1
324 return partial, last, lrev
324 return partial, last, lrev
325
325
326 def _writebranchcache(self, branches, tip, tiprev):
326 def _writebranchcache(self, branches, tip, tiprev):
327 try:
327 try:
328 f = self.opener("branches.cache", "w")
328 f = self.opener("branches.cache", "w")
329 f.write("%s %s\n" % (hex(tip), tiprev))
329 f.write("%s %s\n" % (hex(tip), tiprev))
330 for label, node in branches.iteritems():
330 for label, node in branches.iteritems():
331 f.write("%s %s\n" % (hex(node), label))
331 f.write("%s %s\n" % (hex(node), label))
332 except IOError:
332 except IOError:
333 pass
333 pass
334
334
335 def _updatebranchcache(self, partial, start, end):
335 def _updatebranchcache(self, partial, start, end):
336 for r in xrange(start, end):
336 for r in xrange(start, end):
337 c = self.changectx(r)
337 c = self.changectx(r)
338 b = c.branch()
338 b = c.branch()
339 if b:
339 if b:
340 partial[b] = c.node()
340 partial[b] = c.node()
341
341
342 def lookup(self, key):
342 def lookup(self, key):
343 if key == '.':
343 if key == '.':
344 key = self.dirstate.parents()[0]
344 key = self.dirstate.parents()[0]
345 if key == nullid:
345 if key == nullid:
346 raise repo.RepoError(_("no revision checked out"))
346 raise repo.RepoError(_("no revision checked out"))
347 n = self.changelog._match(key)
347 n = self.changelog._match(key)
348 if n:
348 if n:
349 return n
349 return n
350 if key in self.tags():
350 if key in self.tags():
351 return self.tags()[key]
351 return self.tags()[key]
352 if key in self.branchtags():
352 if key in self.branchtags():
353 return self.branchtags()[key]
353 return self.branchtags()[key]
354 n = self.changelog._partialmatch(key)
354 n = self.changelog._partialmatch(key)
355 if n:
355 if n:
356 return n
356 return n
357 raise repo.RepoError(_("unknown revision '%s'") % key)
357 raise repo.RepoError(_("unknown revision '%s'") % key)
358
358
359 def dev(self):
359 def dev(self):
360 return os.lstat(self.path).st_dev
360 return os.lstat(self.path).st_dev
361
361
362 def local(self):
362 def local(self):
363 return True
363 return True
364
364
365 def join(self, f):
365 def join(self, f):
366 return os.path.join(self.path, f)
366 return os.path.join(self.path, f)
367
367
368 def sjoin(self, f):
368 def sjoin(self, f):
369 return os.path.join(self.path, f)
369 return os.path.join(self.path, f)
370
370
371 def wjoin(self, f):
371 def wjoin(self, f):
372 return os.path.join(self.root, f)
372 return os.path.join(self.root, f)
373
373
374 def file(self, f):
374 def file(self, f):
375 if f[0] == '/':
375 if f[0] == '/':
376 f = f[1:]
376 f = f[1:]
377 return filelog.filelog(self.sopener, f, self.revlogversion)
377 return filelog.filelog(self.sopener, f, self.revlogversion)
378
378
379 def changectx(self, changeid=None):
379 def changectx(self, changeid=None):
380 return context.changectx(self, changeid)
380 return context.changectx(self, changeid)
381
381
382 def workingctx(self):
382 def workingctx(self):
383 return context.workingctx(self)
383 return context.workingctx(self)
384
384
385 def parents(self, changeid=None):
385 def parents(self, changeid=None):
386 '''
386 '''
387 get list of changectxs for parents of changeid or working directory
387 get list of changectxs for parents of changeid or working directory
388 '''
388 '''
389 if changeid is None:
389 if changeid is None:
390 pl = self.dirstate.parents()
390 pl = self.dirstate.parents()
391 else:
391 else:
392 n = self.changelog.lookup(changeid)
392 n = self.changelog.lookup(changeid)
393 pl = self.changelog.parents(n)
393 pl = self.changelog.parents(n)
394 if pl[1] == nullid:
394 if pl[1] == nullid:
395 return [self.changectx(pl[0])]
395 return [self.changectx(pl[0])]
396 return [self.changectx(pl[0]), self.changectx(pl[1])]
396 return [self.changectx(pl[0]), self.changectx(pl[1])]
397
397
398 def filectx(self, path, changeid=None, fileid=None):
398 def filectx(self, path, changeid=None, fileid=None):
399 """changeid can be a changeset revision, node, or tag.
399 """changeid can be a changeset revision, node, or tag.
400 fileid can be a file revision or node."""
400 fileid can be a file revision or node."""
401 return context.filectx(self, path, changeid, fileid)
401 return context.filectx(self, path, changeid, fileid)
402
402
403 def getcwd(self):
403 def getcwd(self):
404 return self.dirstate.getcwd()
404 return self.dirstate.getcwd()
405
405
406 def wfile(self, f, mode='r'):
406 def wfile(self, f, mode='r'):
407 return self.wopener(f, mode)
407 return self.wopener(f, mode)
408
408
409 def wread(self, filename):
409 def wread(self, filename):
410 if self.encodepats == None:
410 if self.encodepats == None:
411 l = []
411 l = []
412 for pat, cmd in self.ui.configitems("encode"):
412 for pat, cmd in self.ui.configitems("encode"):
413 mf = util.matcher(self.root, "", [pat], [], [])[1]
413 mf = util.matcher(self.root, "", [pat], [], [])[1]
414 l.append((mf, cmd))
414 l.append((mf, cmd))
415 self.encodepats = l
415 self.encodepats = l
416
416
417 data = self.wopener(filename, 'r').read()
417 data = self.wopener(filename, 'r').read()
418
418
419 for mf, cmd in self.encodepats:
419 for mf, cmd in self.encodepats:
420 if mf(filename):
420 if mf(filename):
421 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
421 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
422 data = util.filter(data, cmd)
422 data = util.filter(data, cmd)
423 break
423 break
424
424
425 return data
425 return data
426
426
427 def wwrite(self, filename, data, fd=None):
427 def wwrite(self, filename, data, fd=None):
428 if self.decodepats == None:
428 if self.decodepats == None:
429 l = []
429 l = []
430 for pat, cmd in self.ui.configitems("decode"):
430 for pat, cmd in self.ui.configitems("decode"):
431 mf = util.matcher(self.root, "", [pat], [], [])[1]
431 mf = util.matcher(self.root, "", [pat], [], [])[1]
432 l.append((mf, cmd))
432 l.append((mf, cmd))
433 self.decodepats = l
433 self.decodepats = l
434
434
435 for mf, cmd in self.decodepats:
435 for mf, cmd in self.decodepats:
436 if mf(filename):
436 if mf(filename):
437 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
437 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
438 data = util.filter(data, cmd)
438 data = util.filter(data, cmd)
439 break
439 break
440
440
441 if fd:
441 if fd:
442 return fd.write(data)
442 return fd.write(data)
443 return self.wopener(filename, 'w').write(data)
443 return self.wopener(filename, 'w').write(data)
444
444
445 def transaction(self):
445 def transaction(self):
446 tr = self.transhandle
446 tr = self.transhandle
447 if tr != None and tr.running():
447 if tr != None and tr.running():
448 return tr.nest()
448 return tr.nest()
449
449
450 # save dirstate for rollback
450 # save dirstate for rollback
451 try:
451 try:
452 ds = self.opener("dirstate").read()
452 ds = self.opener("dirstate").read()
453 except IOError:
453 except IOError:
454 ds = ""
454 ds = ""
455 self.opener("journal.dirstate", "w").write(ds)
455 self.opener("journal.dirstate", "w").write(ds)
456
456
457 tr = transaction.transaction(self.ui.warn, self.sopener,
457 tr = transaction.transaction(self.ui.warn, self.sopener,
458 self.sjoin("journal"),
458 self.sjoin("journal"),
459 aftertrans(self.path))
459 aftertrans(self.path))
460 self.transhandle = tr
460 self.transhandle = tr
461 return tr
461 return tr
462
462
463 def recover(self):
463 def recover(self):
464 l = self.lock()
464 l = self.lock()
465 if os.path.exists(self.sjoin("journal")):
465 if os.path.exists(self.sjoin("journal")):
466 self.ui.status(_("rolling back interrupted transaction\n"))
466 self.ui.status(_("rolling back interrupted transaction\n"))
467 transaction.rollback(self.sopener, self.sjoin("journal"))
467 transaction.rollback(self.sopener, self.sjoin("journal"))
468 self.reload()
468 self.reload()
469 return True
469 return True
470 else:
470 else:
471 self.ui.warn(_("no interrupted transaction available\n"))
471 self.ui.warn(_("no interrupted transaction available\n"))
472 return False
472 return False
473
473
474 def rollback(self, wlock=None):
474 def rollback(self, wlock=None):
475 if not wlock:
475 if not wlock:
476 wlock = self.wlock()
476 wlock = self.wlock()
477 l = self.lock()
477 l = self.lock()
478 if os.path.exists(self.sjoin("undo")):
478 if os.path.exists(self.sjoin("undo")):
479 self.ui.status(_("rolling back last transaction\n"))
479 self.ui.status(_("rolling back last transaction\n"))
480 transaction.rollback(self.sopener, self.sjoin("undo"))
480 transaction.rollback(self.sopener, self.sjoin("undo"))
481 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
481 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
482 self.reload()
482 self.reload()
483 self.wreload()
483 self.wreload()
484 else:
484 else:
485 self.ui.warn(_("no rollback information available\n"))
485 self.ui.warn(_("no rollback information available\n"))
486
486
487 def wreload(self):
487 def wreload(self):
488 self.dirstate.read()
488 self.dirstate.read()
489
489
490 def reload(self):
490 def reload(self):
491 self.changelog.load()
491 self.changelog.load()
492 self.manifest.load()
492 self.manifest.load()
493 self.tagscache = None
493 self.tagscache = None
494 self.nodetagscache = None
494 self.nodetagscache = None
495
495
496 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
496 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
497 desc=None):
497 desc=None):
498 try:
498 try:
499 l = lock.lock(lockname, 0, releasefn, desc=desc)
499 l = lock.lock(lockname, 0, releasefn, desc=desc)
500 except lock.LockHeld, inst:
500 except lock.LockHeld, inst:
501 if not wait:
501 if not wait:
502 raise
502 raise
503 self.ui.warn(_("waiting for lock on %s held by %s\n") %
503 self.ui.warn(_("waiting for lock on %s held by %s\n") %
504 (desc, inst.args[0]))
504 (desc, inst.args[0]))
505 # default to 600 seconds timeout
505 # default to 600 seconds timeout
506 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
506 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
507 releasefn, desc=desc)
507 releasefn, desc=desc)
508 if acquirefn:
508 if acquirefn:
509 acquirefn()
509 acquirefn()
510 return l
510 return l
511
511
512 def lock(self, wait=1):
512 def lock(self, wait=1):
513 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
513 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
514 desc=_('repository %s') % self.origroot)
514 desc=_('repository %s') % self.origroot)
515
515
516 def wlock(self, wait=1):
516 def wlock(self, wait=1):
517 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
517 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
518 self.wreload,
518 self.wreload,
519 desc=_('working directory of %s') % self.origroot)
519 desc=_('working directory of %s') % self.origroot)
520
520
521 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
521 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
522 """
522 """
523 commit an individual file as part of a larger transaction
523 commit an individual file as part of a larger transaction
524 """
524 """
525
525
526 t = self.wread(fn)
526 t = self.wread(fn)
527 fl = self.file(fn)
527 fl = self.file(fn)
528 fp1 = manifest1.get(fn, nullid)
528 fp1 = manifest1.get(fn, nullid)
529 fp2 = manifest2.get(fn, nullid)
529 fp2 = manifest2.get(fn, nullid)
530
530
531 meta = {}
531 meta = {}
532 cp = self.dirstate.copied(fn)
532 cp = self.dirstate.copied(fn)
533 if cp:
533 if cp:
534 meta["copy"] = cp
534 meta["copy"] = cp
535 if not manifest2: # not a branch merge
535 if not manifest2: # not a branch merge
536 meta["copyrev"] = hex(manifest1.get(cp, nullid))
536 meta["copyrev"] = hex(manifest1.get(cp, nullid))
537 fp2 = nullid
537 fp2 = nullid
538 elif fp2 != nullid: # copied on remote side
538 elif fp2 != nullid: # copied on remote side
539 meta["copyrev"] = hex(manifest1.get(cp, nullid))
539 meta["copyrev"] = hex(manifest1.get(cp, nullid))
540 else: # copied on local side, reversed
540 else: # copied on local side, reversed
541 meta["copyrev"] = hex(manifest2.get(cp))
541 meta["copyrev"] = hex(manifest2.get(cp))
542 fp2 = nullid
542 fp2 = nullid
543 self.ui.debug(_(" %s: copy %s:%s\n") %
543 self.ui.debug(_(" %s: copy %s:%s\n") %
544 (fn, cp, meta["copyrev"]))
544 (fn, cp, meta["copyrev"]))
545 fp1 = nullid
545 fp1 = nullid
546 elif fp2 != nullid:
546 elif fp2 != nullid:
547 # is one parent an ancestor of the other?
547 # is one parent an ancestor of the other?
548 fpa = fl.ancestor(fp1, fp2)
548 fpa = fl.ancestor(fp1, fp2)
549 if fpa == fp1:
549 if fpa == fp1:
550 fp1, fp2 = fp2, nullid
550 fp1, fp2 = fp2, nullid
551 elif fpa == fp2:
551 elif fpa == fp2:
552 fp2 = nullid
552 fp2 = nullid
553
553
554 # is the file unmodified from the parent? report existing entry
554 # is the file unmodified from the parent? report existing entry
555 if fp2 == nullid and not fl.cmp(fp1, t):
555 if fp2 == nullid and not fl.cmp(fp1, t):
556 return fp1
556 return fp1
557
557
558 changelist.append(fn)
558 changelist.append(fn)
559 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
559 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
560
560
561 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
561 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
562 orig_parent = self.dirstate.parents()[0] or nullid
562 orig_parent = self.dirstate.parents()[0] or nullid
563 p1 = p1 or self.dirstate.parents()[0] or nullid
563 p1 = p1 or self.dirstate.parents()[0] or nullid
564 p2 = p2 or self.dirstate.parents()[1] or nullid
564 p2 = p2 or self.dirstate.parents()[1] or nullid
565 c1 = self.changelog.read(p1)
565 c1 = self.changelog.read(p1)
566 c2 = self.changelog.read(p2)
566 c2 = self.changelog.read(p2)
567 m1 = self.manifest.read(c1[0]).copy()
567 m1 = self.manifest.read(c1[0]).copy()
568 m2 = self.manifest.read(c2[0])
568 m2 = self.manifest.read(c2[0])
569 changed = []
569 changed = []
570 removed = []
570 removed = []
571
571
572 if orig_parent == p1:
572 if orig_parent == p1:
573 update_dirstate = 1
573 update_dirstate = 1
574 else:
574 else:
575 update_dirstate = 0
575 update_dirstate = 0
576
576
577 if not wlock:
577 if not wlock:
578 wlock = self.wlock()
578 wlock = self.wlock()
579 l = self.lock()
579 l = self.lock()
580 tr = self.transaction()
580 tr = self.transaction()
581 linkrev = self.changelog.count()
581 linkrev = self.changelog.count()
582 for f in files:
582 for f in files:
583 try:
583 try:
584 m1[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
584 m1[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
585 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
585 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
586 except IOError:
586 except IOError:
587 try:
587 try:
588 del m1[f]
588 del m1[f]
589 if update_dirstate:
589 if update_dirstate:
590 self.dirstate.forget([f])
590 self.dirstate.forget([f])
591 removed.append(f)
591 removed.append(f)
592 except:
592 except:
593 # deleted from p2?
593 # deleted from p2?
594 pass
594 pass
595
595
596 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
596 mnode = self.manifest.add(m1, tr, linkrev, c1[0], c2[0])
597 user = user or self.ui.username()
597 user = user or self.ui.username()
598 n = self.changelog.add(mnode, changed + removed, text,
598 n = self.changelog.add(mnode, changed + removed, text,
599 tr, p1, p2, user, date)
599 tr, p1, p2, user, date)
600 tr.close()
600 tr.close()
601 if update_dirstate:
601 if update_dirstate:
602 self.dirstate.setparents(n, nullid)
602 self.dirstate.setparents(n, nullid)
603
603
604 def commit(self, files=None, text="", user=None, date=None,
604 def commit(self, files=None, text="", user=None, date=None,
605 match=util.always, force=False, lock=None, wlock=None,
605 match=util.always, force=False, lock=None, wlock=None,
606 force_editor=False):
606 force_editor=False):
607 commit = []
607 commit = []
608 remove = []
608 remove = []
609 changed = []
609 changed = []
610
610
611 if files:
611 if files:
612 for f in files:
612 for f in files:
613 s = self.dirstate.state(f)
613 s = self.dirstate.state(f)
614 if s in 'nmai':
614 if s in 'nmai':
615 commit.append(f)
615 commit.append(f)
616 elif s == 'r':
616 elif s == 'r':
617 remove.append(f)
617 remove.append(f)
618 else:
618 else:
619 self.ui.warn(_("%s not tracked!\n") % f)
619 self.ui.warn(_("%s not tracked!\n") % f)
620 else:
620 else:
621 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
621 modified, added, removed, deleted, unknown = self.status(match=match)[:5]
622 commit = modified + added
622 commit = modified + added
623 remove = removed
623 remove = removed
624
624
625 p1, p2 = self.dirstate.parents()
625 p1, p2 = self.dirstate.parents()
626 c1 = self.changelog.read(p1)
626 c1 = self.changelog.read(p1)
627 c2 = self.changelog.read(p2)
627 c2 = self.changelog.read(p2)
628 m1 = self.manifest.read(c1[0]).copy()
628 m1 = self.manifest.read(c1[0]).copy()
629 m2 = self.manifest.read(c2[0])
629 m2 = self.manifest.read(c2[0])
630
630
631 branchname = self.workingctx().branch()
631 branchname = self.workingctx().branch()
632 oldname = c1[5].get("branch", "")
632 oldname = c1[5].get("branch", "")
633
633
634 if not commit and not remove and not force and p2 == nullid and \
634 if not commit and not remove and not force and p2 == nullid and \
635 branchname == oldname:
635 branchname == oldname:
636 self.ui.status(_("nothing changed\n"))
636 self.ui.status(_("nothing changed\n"))
637 return None
637 return None
638
638
639 xp1 = hex(p1)
639 xp1 = hex(p1)
640 if p2 == nullid: xp2 = ''
640 if p2 == nullid: xp2 = ''
641 else: xp2 = hex(p2)
641 else: xp2 = hex(p2)
642
642
643 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
643 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
644
644
645 if not wlock:
645 if not wlock:
646 wlock = self.wlock()
646 wlock = self.wlock()
647 if not lock:
647 if not lock:
648 lock = self.lock()
648 lock = self.lock()
649 tr = self.transaction()
649 tr = self.transaction()
650
650
651 # check in files
651 # check in files
652 new = {}
652 new = {}
653 linkrev = self.changelog.count()
653 linkrev = self.changelog.count()
654 commit.sort()
654 commit.sort()
655 for f in commit:
655 for f in commit:
656 self.ui.note(f + "\n")
656 self.ui.note(f + "\n")
657 try:
657 try:
658 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
658 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
659 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
659 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
660 except IOError:
660 except IOError:
661 self.ui.warn(_("trouble committing %s!\n") % f)
661 self.ui.warn(_("trouble committing %s!\n") % f)
662 raise
662 raise
663
663
664 # update manifest
664 # update manifest
665 m1.update(new)
665 m1.update(new)
666 for f in remove:
666 for f in remove:
667 if f in m1:
667 if f in m1:
668 del m1[f]
668 del m1[f]
669 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
669 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
670
670
671 # add changeset
671 # add changeset
672 new = new.keys()
672 new = new.keys()
673 new.sort()
673 new.sort()
674
674
675 user = user or self.ui.username()
675 user = user or self.ui.username()
676 if not text or force_editor:
676 if not text or force_editor:
677 edittext = []
677 edittext = []
678 if text:
678 if text:
679 edittext.append(text)
679 edittext.append(text)
680 edittext.append("")
680 edittext.append("")
681 if p2 != nullid:
681 if p2 != nullid:
682 edittext.append("HG: branch merge")
682 edittext.append("HG: branch merge")
683 edittext.extend(["HG: changed %s" % f for f in changed])
683 edittext.extend(["HG: changed %s" % f for f in changed])
684 edittext.extend(["HG: removed %s" % f for f in remove])
684 edittext.extend(["HG: removed %s" % f for f in remove])
685 if not changed and not remove:
685 if not changed and not remove:
686 edittext.append("HG: no files changed")
686 edittext.append("HG: no files changed")
687 edittext.append("")
687 edittext.append("")
688 # run editor in the repository root
688 # run editor in the repository root
689 olddir = os.getcwd()
689 olddir = os.getcwd()
690 os.chdir(self.root)
690 os.chdir(self.root)
691 text = self.ui.edit("\n".join(edittext), user)
691 text = self.ui.edit("\n".join(edittext), user)
692 os.chdir(olddir)
692 os.chdir(olddir)
693
693
694 lines = [line.rstrip() for line in text.rstrip().splitlines()]
694 lines = [line.rstrip() for line in text.rstrip().splitlines()]
695 while lines and not lines[0]:
695 while lines and not lines[0]:
696 del lines[0]
696 del lines[0]
697 if not lines:
697 if not lines:
698 return None
698 return None
699 text = '\n'.join(lines)
699 text = '\n'.join(lines)
700 extra = {}
700 extra = {}
701 if branchname:
701 if branchname:
702 extra["branch"] = branchname
702 extra["branch"] = branchname
703 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
703 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
704 user, date, extra)
704 user, date, extra)
705 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
705 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
706 parent2=xp2)
706 parent2=xp2)
707 tr.close()
707 tr.close()
708
708
709 self.dirstate.setparents(n)
709 self.dirstate.setparents(n)
710 self.dirstate.update(new, "n")
710 self.dirstate.update(new, "n")
711 self.dirstate.forget(remove)
711 self.dirstate.forget(remove)
712
712
713 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
713 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
714 return n
714 return n
715
715
716 def walk(self, node=None, files=[], match=util.always, badmatch=None):
716 def walk(self, node=None, files=[], match=util.always, badmatch=None):
717 '''
718 walk recursively through the directory tree or a given
719 changeset, finding all files matched by the match
720 function
721
722 results are yielded in a tuple (src, filename), where src
723 is one of:
724 'f' the file was found in the directory tree
725 'm' the file was only in the dirstate and not in the tree
726 'b' file was not found and matched badmatch
727 '''
728
717 if node:
729 if node:
718 fdict = dict.fromkeys(files)
730 fdict = dict.fromkeys(files)
719 for fn in self.manifest.read(self.changelog.read(node)[0]):
731 for fn in self.manifest.read(self.changelog.read(node)[0]):
720 for ffn in fdict:
732 for ffn in fdict:
721 # match if the file is the exact name or a directory
733 # match if the file is the exact name or a directory
722 if ffn == fn or fn.startswith("%s/" % ffn):
734 if ffn == fn or fn.startswith("%s/" % ffn):
723 del fdict[ffn]
735 del fdict[ffn]
724 break
736 break
725 if match(fn):
737 if match(fn):
726 yield 'm', fn
738 yield 'm', fn
727 for fn in fdict:
739 for fn in fdict:
728 if badmatch and badmatch(fn):
740 if badmatch and badmatch(fn):
729 if match(fn):
741 if match(fn):
730 yield 'b', fn
742 yield 'b', fn
731 else:
743 else:
732 self.ui.warn(_('%s: No such file in rev %s\n') % (
744 self.ui.warn(_('%s: No such file in rev %s\n') % (
733 util.pathto(self.getcwd(), fn), short(node)))
745 util.pathto(self.getcwd(), fn), short(node)))
734 else:
746 else:
735 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
747 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
736 yield src, fn
748 yield src, fn
737
749
738 def status(self, node1=None, node2=None, files=[], match=util.always,
750 def status(self, node1=None, node2=None, files=[], match=util.always,
739 wlock=None, list_ignored=False, list_clean=False):
751 wlock=None, list_ignored=False, list_clean=False):
740 """return status of files between two nodes or node and working directory
752 """return status of files between two nodes or node and working directory
741
753
742 If node1 is None, use the first dirstate parent instead.
754 If node1 is None, use the first dirstate parent instead.
743 If node2 is None, compare node1 with working directory.
755 If node2 is None, compare node1 with working directory.
744 """
756 """
745
757
746 def fcmp(fn, mf):
758 def fcmp(fn, mf):
747 t1 = self.wread(fn)
759 t1 = self.wread(fn)
748 return self.file(fn).cmp(mf.get(fn, nullid), t1)
760 return self.file(fn).cmp(mf.get(fn, nullid), t1)
749
761
750 def mfmatches(node):
762 def mfmatches(node):
751 change = self.changelog.read(node)
763 change = self.changelog.read(node)
752 mf = self.manifest.read(change[0]).copy()
764 mf = self.manifest.read(change[0]).copy()
753 for fn in mf.keys():
765 for fn in mf.keys():
754 if not match(fn):
766 if not match(fn):
755 del mf[fn]
767 del mf[fn]
756 return mf
768 return mf
757
769
758 modified, added, removed, deleted, unknown = [], [], [], [], []
770 modified, added, removed, deleted, unknown = [], [], [], [], []
759 ignored, clean = [], []
771 ignored, clean = [], []
760
772
761 compareworking = False
773 compareworking = False
762 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
774 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
763 compareworking = True
775 compareworking = True
764
776
765 if not compareworking:
777 if not compareworking:
766 # read the manifest from node1 before the manifest from node2,
778 # read the manifest from node1 before the manifest from node2,
767 # so that we'll hit the manifest cache if we're going through
779 # so that we'll hit the manifest cache if we're going through
768 # all the revisions in parent->child order.
780 # all the revisions in parent->child order.
769 mf1 = mfmatches(node1)
781 mf1 = mfmatches(node1)
770
782
771 # are we comparing the working directory?
783 # are we comparing the working directory?
772 if not node2:
784 if not node2:
773 if not wlock:
785 if not wlock:
774 try:
786 try:
775 wlock = self.wlock(wait=0)
787 wlock = self.wlock(wait=0)
776 except lock.LockException:
788 except lock.LockException:
777 wlock = None
789 wlock = None
778 (lookup, modified, added, removed, deleted, unknown,
790 (lookup, modified, added, removed, deleted, unknown,
779 ignored, clean) = self.dirstate.status(files, match,
791 ignored, clean) = self.dirstate.status(files, match,
780 list_ignored, list_clean)
792 list_ignored, list_clean)
781
793
782 # are we comparing working dir against its parent?
794 # are we comparing working dir against its parent?
783 if compareworking:
795 if compareworking:
784 if lookup:
796 if lookup:
785 # do a full compare of any files that might have changed
797 # do a full compare of any files that might have changed
786 mf2 = mfmatches(self.dirstate.parents()[0])
798 mf2 = mfmatches(self.dirstate.parents()[0])
787 for f in lookup:
799 for f in lookup:
788 if fcmp(f, mf2):
800 if fcmp(f, mf2):
789 modified.append(f)
801 modified.append(f)
790 else:
802 else:
791 clean.append(f)
803 clean.append(f)
792 if wlock is not None:
804 if wlock is not None:
793 self.dirstate.update([f], "n")
805 self.dirstate.update([f], "n")
794 else:
806 else:
795 # we are comparing working dir against non-parent
807 # we are comparing working dir against non-parent
796 # generate a pseudo-manifest for the working dir
808 # generate a pseudo-manifest for the working dir
797 # XXX: create it in dirstate.py ?
809 # XXX: create it in dirstate.py ?
798 mf2 = mfmatches(self.dirstate.parents()[0])
810 mf2 = mfmatches(self.dirstate.parents()[0])
799 for f in lookup + modified + added:
811 for f in lookup + modified + added:
800 mf2[f] = ""
812 mf2[f] = ""
801 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
813 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
802 for f in removed:
814 for f in removed:
803 if f in mf2:
815 if f in mf2:
804 del mf2[f]
816 del mf2[f]
805 else:
817 else:
806 # we are comparing two revisions
818 # we are comparing two revisions
807 mf2 = mfmatches(node2)
819 mf2 = mfmatches(node2)
808
820
809 if not compareworking:
821 if not compareworking:
810 # flush lists from dirstate before comparing manifests
822 # flush lists from dirstate before comparing manifests
811 modified, added, clean = [], [], []
823 modified, added, clean = [], [], []
812
824
813 # make sure to sort the files so we talk to the disk in a
825 # make sure to sort the files so we talk to the disk in a
814 # reasonable order
826 # reasonable order
815 mf2keys = mf2.keys()
827 mf2keys = mf2.keys()
816 mf2keys.sort()
828 mf2keys.sort()
817 for fn in mf2keys:
829 for fn in mf2keys:
818 if mf1.has_key(fn):
830 if mf1.has_key(fn):
819 if mf1.flags(fn) != mf2.flags(fn) or \
831 if mf1.flags(fn) != mf2.flags(fn) or \
820 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
832 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
821 modified.append(fn)
833 modified.append(fn)
822 elif list_clean:
834 elif list_clean:
823 clean.append(fn)
835 clean.append(fn)
824 del mf1[fn]
836 del mf1[fn]
825 else:
837 else:
826 added.append(fn)
838 added.append(fn)
827
839
828 removed = mf1.keys()
840 removed = mf1.keys()
829
841
830 # sort and return results:
842 # sort and return results:
831 for l in modified, added, removed, deleted, unknown, ignored, clean:
843 for l in modified, added, removed, deleted, unknown, ignored, clean:
832 l.sort()
844 l.sort()
833 return (modified, added, removed, deleted, unknown, ignored, clean)
845 return (modified, added, removed, deleted, unknown, ignored, clean)
834
846
835 def add(self, list, wlock=None):
847 def add(self, list, wlock=None):
836 if not wlock:
848 if not wlock:
837 wlock = self.wlock()
849 wlock = self.wlock()
838 for f in list:
850 for f in list:
839 p = self.wjoin(f)
851 p = self.wjoin(f)
840 if not os.path.exists(p):
852 if not os.path.exists(p):
841 self.ui.warn(_("%s does not exist!\n") % f)
853 self.ui.warn(_("%s does not exist!\n") % f)
842 elif not os.path.isfile(p):
854 elif not os.path.isfile(p):
843 self.ui.warn(_("%s not added: only files supported currently\n")
855 self.ui.warn(_("%s not added: only files supported currently\n")
844 % f)
856 % f)
845 elif self.dirstate.state(f) in 'an':
857 elif self.dirstate.state(f) in 'an':
846 self.ui.warn(_("%s already tracked!\n") % f)
858 self.ui.warn(_("%s already tracked!\n") % f)
847 else:
859 else:
848 self.dirstate.update([f], "a")
860 self.dirstate.update([f], "a")
849
861
850 def forget(self, list, wlock=None):
862 def forget(self, list, wlock=None):
851 if not wlock:
863 if not wlock:
852 wlock = self.wlock()
864 wlock = self.wlock()
853 for f in list:
865 for f in list:
854 if self.dirstate.state(f) not in 'ai':
866 if self.dirstate.state(f) not in 'ai':
855 self.ui.warn(_("%s not added!\n") % f)
867 self.ui.warn(_("%s not added!\n") % f)
856 else:
868 else:
857 self.dirstate.forget([f])
869 self.dirstate.forget([f])
858
870
859 def remove(self, list, unlink=False, wlock=None):
871 def remove(self, list, unlink=False, wlock=None):
860 if unlink:
872 if unlink:
861 for f in list:
873 for f in list:
862 try:
874 try:
863 util.unlink(self.wjoin(f))
875 util.unlink(self.wjoin(f))
864 except OSError, inst:
876 except OSError, inst:
865 if inst.errno != errno.ENOENT:
877 if inst.errno != errno.ENOENT:
866 raise
878 raise
867 if not wlock:
879 if not wlock:
868 wlock = self.wlock()
880 wlock = self.wlock()
869 for f in list:
881 for f in list:
870 p = self.wjoin(f)
882 p = self.wjoin(f)
871 if os.path.exists(p):
883 if os.path.exists(p):
872 self.ui.warn(_("%s still exists!\n") % f)
884 self.ui.warn(_("%s still exists!\n") % f)
873 elif self.dirstate.state(f) == 'a':
885 elif self.dirstate.state(f) == 'a':
874 self.dirstate.forget([f])
886 self.dirstate.forget([f])
875 elif f not in self.dirstate:
887 elif f not in self.dirstate:
876 self.ui.warn(_("%s not tracked!\n") % f)
888 self.ui.warn(_("%s not tracked!\n") % f)
877 else:
889 else:
878 self.dirstate.update([f], "r")
890 self.dirstate.update([f], "r")
879
891
880 def undelete(self, list, wlock=None):
892 def undelete(self, list, wlock=None):
881 p = self.dirstate.parents()[0]
893 p = self.dirstate.parents()[0]
882 mn = self.changelog.read(p)[0]
894 mn = self.changelog.read(p)[0]
883 m = self.manifest.read(mn)
895 m = self.manifest.read(mn)
884 if not wlock:
896 if not wlock:
885 wlock = self.wlock()
897 wlock = self.wlock()
886 for f in list:
898 for f in list:
887 if self.dirstate.state(f) not in "r":
899 if self.dirstate.state(f) not in "r":
888 self.ui.warn("%s not removed!\n" % f)
900 self.ui.warn("%s not removed!\n" % f)
889 else:
901 else:
890 t = self.file(f).read(m[f])
902 t = self.file(f).read(m[f])
891 self.wwrite(f, t)
903 self.wwrite(f, t)
892 util.set_exec(self.wjoin(f), m.execf(f))
904 util.set_exec(self.wjoin(f), m.execf(f))
893 self.dirstate.update([f], "n")
905 self.dirstate.update([f], "n")
894
906
895 def copy(self, source, dest, wlock=None):
907 def copy(self, source, dest, wlock=None):
896 p = self.wjoin(dest)
908 p = self.wjoin(dest)
897 if not os.path.exists(p):
909 if not os.path.exists(p):
898 self.ui.warn(_("%s does not exist!\n") % dest)
910 self.ui.warn(_("%s does not exist!\n") % dest)
899 elif not os.path.isfile(p):
911 elif not os.path.isfile(p):
900 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
912 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
901 else:
913 else:
902 if not wlock:
914 if not wlock:
903 wlock = self.wlock()
915 wlock = self.wlock()
904 if self.dirstate.state(dest) == '?':
916 if self.dirstate.state(dest) == '?':
905 self.dirstate.update([dest], "a")
917 self.dirstate.update([dest], "a")
906 self.dirstate.copy(source, dest)
918 self.dirstate.copy(source, dest)
907
919
908 def heads(self, start=None):
920 def heads(self, start=None):
909 heads = self.changelog.heads(start)
921 heads = self.changelog.heads(start)
910 # sort the output in rev descending order
922 # sort the output in rev descending order
911 heads = [(-self.changelog.rev(h), h) for h in heads]
923 heads = [(-self.changelog.rev(h), h) for h in heads]
912 heads.sort()
924 heads.sort()
913 return [n for (r, n) in heads]
925 return [n for (r, n) in heads]
914
926
915 # branchlookup returns a dict giving a list of branches for
927 # branchlookup returns a dict giving a list of branches for
916 # each head. A branch is defined as the tag of a node or
928 # each head. A branch is defined as the tag of a node or
917 # the branch of the node's parents. If a node has multiple
929 # the branch of the node's parents. If a node has multiple
918 # branch tags, tags are eliminated if they are visible from other
930 # branch tags, tags are eliminated if they are visible from other
919 # branch tags.
931 # branch tags.
920 #
932 #
921 # So, for this graph: a->b->c->d->e
933 # So, for this graph: a->b->c->d->e
922 # \ /
934 # \ /
923 # aa -----/
935 # aa -----/
924 # a has tag 2.6.12
936 # a has tag 2.6.12
925 # d has tag 2.6.13
937 # d has tag 2.6.13
926 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
938 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
927 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
939 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
928 # from the list.
940 # from the list.
929 #
941 #
930 # It is possible that more than one head will have the same branch tag.
942 # It is possible that more than one head will have the same branch tag.
931 # callers need to check the result for multiple heads under the same
943 # callers need to check the result for multiple heads under the same
932 # branch tag if that is a problem for them (ie checkout of a specific
944 # branch tag if that is a problem for them (ie checkout of a specific
933 # branch).
945 # branch).
934 #
946 #
935 # passing in a specific branch will limit the depth of the search
947 # passing in a specific branch will limit the depth of the search
936 # through the parents. It won't limit the branches returned in the
948 # through the parents. It won't limit the branches returned in the
937 # result though.
949 # result though.
938 def branchlookup(self, heads=None, branch=None):
950 def branchlookup(self, heads=None, branch=None):
939 if not heads:
951 if not heads:
940 heads = self.heads()
952 heads = self.heads()
941 headt = [ h for h in heads ]
953 headt = [ h for h in heads ]
942 chlog = self.changelog
954 chlog = self.changelog
943 branches = {}
955 branches = {}
944 merges = []
956 merges = []
945 seenmerge = {}
957 seenmerge = {}
946
958
947 # traverse the tree once for each head, recording in the branches
959 # traverse the tree once for each head, recording in the branches
948 # dict which tags are visible from this head. The branches
960 # dict which tags are visible from this head. The branches
949 # dict also records which tags are visible from each tag
961 # dict also records which tags are visible from each tag
950 # while we traverse.
962 # while we traverse.
951 while headt or merges:
963 while headt or merges:
952 if merges:
964 if merges:
953 n, found = merges.pop()
965 n, found = merges.pop()
954 visit = [n]
966 visit = [n]
955 else:
967 else:
956 h = headt.pop()
968 h = headt.pop()
957 visit = [h]
969 visit = [h]
958 found = [h]
970 found = [h]
959 seen = {}
971 seen = {}
960 while visit:
972 while visit:
961 n = visit.pop()
973 n = visit.pop()
962 if n in seen:
974 if n in seen:
963 continue
975 continue
964 pp = chlog.parents(n)
976 pp = chlog.parents(n)
965 tags = self.nodetags(n)
977 tags = self.nodetags(n)
966 if tags:
978 if tags:
967 for x in tags:
979 for x in tags:
968 if x == 'tip':
980 if x == 'tip':
969 continue
981 continue
970 for f in found:
982 for f in found:
971 branches.setdefault(f, {})[n] = 1
983 branches.setdefault(f, {})[n] = 1
972 branches.setdefault(n, {})[n] = 1
984 branches.setdefault(n, {})[n] = 1
973 break
985 break
974 if n not in found:
986 if n not in found:
975 found.append(n)
987 found.append(n)
976 if branch in tags:
988 if branch in tags:
977 continue
989 continue
978 seen[n] = 1
990 seen[n] = 1
979 if pp[1] != nullid and n not in seenmerge:
991 if pp[1] != nullid and n not in seenmerge:
980 merges.append((pp[1], [x for x in found]))
992 merges.append((pp[1], [x for x in found]))
981 seenmerge[n] = 1
993 seenmerge[n] = 1
982 if pp[0] != nullid:
994 if pp[0] != nullid:
983 visit.append(pp[0])
995 visit.append(pp[0])
984 # traverse the branches dict, eliminating branch tags from each
996 # traverse the branches dict, eliminating branch tags from each
985 # head that are visible from another branch tag for that head.
997 # head that are visible from another branch tag for that head.
986 out = {}
998 out = {}
987 viscache = {}
999 viscache = {}
988 for h in heads:
1000 for h in heads:
989 def visible(node):
1001 def visible(node):
990 if node in viscache:
1002 if node in viscache:
991 return viscache[node]
1003 return viscache[node]
992 ret = {}
1004 ret = {}
993 visit = [node]
1005 visit = [node]
994 while visit:
1006 while visit:
995 x = visit.pop()
1007 x = visit.pop()
996 if x in viscache:
1008 if x in viscache:
997 ret.update(viscache[x])
1009 ret.update(viscache[x])
998 elif x not in ret:
1010 elif x not in ret:
999 ret[x] = 1
1011 ret[x] = 1
1000 if x in branches:
1012 if x in branches:
1001 visit[len(visit):] = branches[x].keys()
1013 visit[len(visit):] = branches[x].keys()
1002 viscache[node] = ret
1014 viscache[node] = ret
1003 return ret
1015 return ret
1004 if h not in branches:
1016 if h not in branches:
1005 continue
1017 continue
1006 # O(n^2), but somewhat limited. This only searches the
1018 # O(n^2), but somewhat limited. This only searches the
1007 # tags visible from a specific head, not all the tags in the
1019 # tags visible from a specific head, not all the tags in the
1008 # whole repo.
1020 # whole repo.
1009 for b in branches[h]:
1021 for b in branches[h]:
1010 vis = False
1022 vis = False
1011 for bb in branches[h].keys():
1023 for bb in branches[h].keys():
1012 if b != bb:
1024 if b != bb:
1013 if b in visible(bb):
1025 if b in visible(bb):
1014 vis = True
1026 vis = True
1015 break
1027 break
1016 if not vis:
1028 if not vis:
1017 l = out.setdefault(h, [])
1029 l = out.setdefault(h, [])
1018 l[len(l):] = self.nodetags(b)
1030 l[len(l):] = self.nodetags(b)
1019 return out
1031 return out
1020
1032
1021 def branches(self, nodes):
1033 def branches(self, nodes):
1022 if not nodes:
1034 if not nodes:
1023 nodes = [self.changelog.tip()]
1035 nodes = [self.changelog.tip()]
1024 b = []
1036 b = []
1025 for n in nodes:
1037 for n in nodes:
1026 t = n
1038 t = n
1027 while 1:
1039 while 1:
1028 p = self.changelog.parents(n)
1040 p = self.changelog.parents(n)
1029 if p[1] != nullid or p[0] == nullid:
1041 if p[1] != nullid or p[0] == nullid:
1030 b.append((t, n, p[0], p[1]))
1042 b.append((t, n, p[0], p[1]))
1031 break
1043 break
1032 n = p[0]
1044 n = p[0]
1033 return b
1045 return b
1034
1046
1035 def between(self, pairs):
1047 def between(self, pairs):
1036 r = []
1048 r = []
1037
1049
1038 for top, bottom in pairs:
1050 for top, bottom in pairs:
1039 n, l, i = top, [], 0
1051 n, l, i = top, [], 0
1040 f = 1
1052 f = 1
1041
1053
1042 while n != bottom:
1054 while n != bottom:
1043 p = self.changelog.parents(n)[0]
1055 p = self.changelog.parents(n)[0]
1044 if i == f:
1056 if i == f:
1045 l.append(n)
1057 l.append(n)
1046 f = f * 2
1058 f = f * 2
1047 n = p
1059 n = p
1048 i += 1
1060 i += 1
1049
1061
1050 r.append(l)
1062 r.append(l)
1051
1063
1052 return r
1064 return r
1053
1065
1054 def findincoming(self, remote, base=None, heads=None, force=False):
1066 def findincoming(self, remote, base=None, heads=None, force=False):
1055 """Return list of roots of the subsets of missing nodes from remote
1067 """Return list of roots of the subsets of missing nodes from remote
1056
1068
1057 If base dict is specified, assume that these nodes and their parents
1069 If base dict is specified, assume that these nodes and their parents
1058 exist on the remote side and that no child of a node of base exists
1070 exist on the remote side and that no child of a node of base exists
1059 in both remote and self.
1071 in both remote and self.
1060 Furthermore base will be updated to include the nodes that exists
1072 Furthermore base will be updated to include the nodes that exists
1061 in self and remote but no children exists in self and remote.
1073 in self and remote but no children exists in self and remote.
1062 If a list of heads is specified, return only nodes which are heads
1074 If a list of heads is specified, return only nodes which are heads
1063 or ancestors of these heads.
1075 or ancestors of these heads.
1064
1076
1065 All the ancestors of base are in self and in remote.
1077 All the ancestors of base are in self and in remote.
1066 All the descendants of the list returned are missing in self.
1078 All the descendants of the list returned are missing in self.
1067 (and so we know that the rest of the nodes are missing in remote, see
1079 (and so we know that the rest of the nodes are missing in remote, see
1068 outgoing)
1080 outgoing)
1069 """
1081 """
1070 m = self.changelog.nodemap
1082 m = self.changelog.nodemap
1071 search = []
1083 search = []
1072 fetch = {}
1084 fetch = {}
1073 seen = {}
1085 seen = {}
1074 seenbranch = {}
1086 seenbranch = {}
1075 if base == None:
1087 if base == None:
1076 base = {}
1088 base = {}
1077
1089
1078 if not heads:
1090 if not heads:
1079 heads = remote.heads()
1091 heads = remote.heads()
1080
1092
1081 if self.changelog.tip() == nullid:
1093 if self.changelog.tip() == nullid:
1082 base[nullid] = 1
1094 base[nullid] = 1
1083 if heads != [nullid]:
1095 if heads != [nullid]:
1084 return [nullid]
1096 return [nullid]
1085 return []
1097 return []
1086
1098
1087 # assume we're closer to the tip than the root
1099 # assume we're closer to the tip than the root
1088 # and start by examining the heads
1100 # and start by examining the heads
1089 self.ui.status(_("searching for changes\n"))
1101 self.ui.status(_("searching for changes\n"))
1090
1102
1091 unknown = []
1103 unknown = []
1092 for h in heads:
1104 for h in heads:
1093 if h not in m:
1105 if h not in m:
1094 unknown.append(h)
1106 unknown.append(h)
1095 else:
1107 else:
1096 base[h] = 1
1108 base[h] = 1
1097
1109
1098 if not unknown:
1110 if not unknown:
1099 return []
1111 return []
1100
1112
1101 req = dict.fromkeys(unknown)
1113 req = dict.fromkeys(unknown)
1102 reqcnt = 0
1114 reqcnt = 0
1103
1115
1104 # search through remote branches
1116 # search through remote branches
1105 # a 'branch' here is a linear segment of history, with four parts:
1117 # a 'branch' here is a linear segment of history, with four parts:
1106 # head, root, first parent, second parent
1118 # head, root, first parent, second parent
1107 # (a branch always has two parents (or none) by definition)
1119 # (a branch always has two parents (or none) by definition)
1108 unknown = remote.branches(unknown)
1120 unknown = remote.branches(unknown)
1109 while unknown:
1121 while unknown:
1110 r = []
1122 r = []
1111 while unknown:
1123 while unknown:
1112 n = unknown.pop(0)
1124 n = unknown.pop(0)
1113 if n[0] in seen:
1125 if n[0] in seen:
1114 continue
1126 continue
1115
1127
1116 self.ui.debug(_("examining %s:%s\n")
1128 self.ui.debug(_("examining %s:%s\n")
1117 % (short(n[0]), short(n[1])))
1129 % (short(n[0]), short(n[1])))
1118 if n[0] == nullid: # found the end of the branch
1130 if n[0] == nullid: # found the end of the branch
1119 pass
1131 pass
1120 elif n in seenbranch:
1132 elif n in seenbranch:
1121 self.ui.debug(_("branch already found\n"))
1133 self.ui.debug(_("branch already found\n"))
1122 continue
1134 continue
1123 elif n[1] and n[1] in m: # do we know the base?
1135 elif n[1] and n[1] in m: # do we know the base?
1124 self.ui.debug(_("found incomplete branch %s:%s\n")
1136 self.ui.debug(_("found incomplete branch %s:%s\n")
1125 % (short(n[0]), short(n[1])))
1137 % (short(n[0]), short(n[1])))
1126 search.append(n) # schedule branch range for scanning
1138 search.append(n) # schedule branch range for scanning
1127 seenbranch[n] = 1
1139 seenbranch[n] = 1
1128 else:
1140 else:
1129 if n[1] not in seen and n[1] not in fetch:
1141 if n[1] not in seen and n[1] not in fetch:
1130 if n[2] in m and n[3] in m:
1142 if n[2] in m and n[3] in m:
1131 self.ui.debug(_("found new changeset %s\n") %
1143 self.ui.debug(_("found new changeset %s\n") %
1132 short(n[1]))
1144 short(n[1]))
1133 fetch[n[1]] = 1 # earliest unknown
1145 fetch[n[1]] = 1 # earliest unknown
1134 for p in n[2:4]:
1146 for p in n[2:4]:
1135 if p in m:
1147 if p in m:
1136 base[p] = 1 # latest known
1148 base[p] = 1 # latest known
1137
1149
1138 for p in n[2:4]:
1150 for p in n[2:4]:
1139 if p not in req and p not in m:
1151 if p not in req and p not in m:
1140 r.append(p)
1152 r.append(p)
1141 req[p] = 1
1153 req[p] = 1
1142 seen[n[0]] = 1
1154 seen[n[0]] = 1
1143
1155
1144 if r:
1156 if r:
1145 reqcnt += 1
1157 reqcnt += 1
1146 self.ui.debug(_("request %d: %s\n") %
1158 self.ui.debug(_("request %d: %s\n") %
1147 (reqcnt, " ".join(map(short, r))))
1159 (reqcnt, " ".join(map(short, r))))
1148 for p in xrange(0, len(r), 10):
1160 for p in xrange(0, len(r), 10):
1149 for b in remote.branches(r[p:p+10]):
1161 for b in remote.branches(r[p:p+10]):
1150 self.ui.debug(_("received %s:%s\n") %
1162 self.ui.debug(_("received %s:%s\n") %
1151 (short(b[0]), short(b[1])))
1163 (short(b[0]), short(b[1])))
1152 unknown.append(b)
1164 unknown.append(b)
1153
1165
1154 # do binary search on the branches we found
1166 # do binary search on the branches we found
1155 while search:
1167 while search:
1156 n = search.pop(0)
1168 n = search.pop(0)
1157 reqcnt += 1
1169 reqcnt += 1
1158 l = remote.between([(n[0], n[1])])[0]
1170 l = remote.between([(n[0], n[1])])[0]
1159 l.append(n[1])
1171 l.append(n[1])
1160 p = n[0]
1172 p = n[0]
1161 f = 1
1173 f = 1
1162 for i in l:
1174 for i in l:
1163 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1175 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1164 if i in m:
1176 if i in m:
1165 if f <= 2:
1177 if f <= 2:
1166 self.ui.debug(_("found new branch changeset %s\n") %
1178 self.ui.debug(_("found new branch changeset %s\n") %
1167 short(p))
1179 short(p))
1168 fetch[p] = 1
1180 fetch[p] = 1
1169 base[i] = 1
1181 base[i] = 1
1170 else:
1182 else:
1171 self.ui.debug(_("narrowed branch search to %s:%s\n")
1183 self.ui.debug(_("narrowed branch search to %s:%s\n")
1172 % (short(p), short(i)))
1184 % (short(p), short(i)))
1173 search.append((p, i))
1185 search.append((p, i))
1174 break
1186 break
1175 p, f = i, f * 2
1187 p, f = i, f * 2
1176
1188
1177 # sanity check our fetch list
1189 # sanity check our fetch list
1178 for f in fetch.keys():
1190 for f in fetch.keys():
1179 if f in m:
1191 if f in m:
1180 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1192 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1181
1193
1182 if base.keys() == [nullid]:
1194 if base.keys() == [nullid]:
1183 if force:
1195 if force:
1184 self.ui.warn(_("warning: repository is unrelated\n"))
1196 self.ui.warn(_("warning: repository is unrelated\n"))
1185 else:
1197 else:
1186 raise util.Abort(_("repository is unrelated"))
1198 raise util.Abort(_("repository is unrelated"))
1187
1199
1188 self.ui.debug(_("found new changesets starting at ") +
1200 self.ui.debug(_("found new changesets starting at ") +
1189 " ".join([short(f) for f in fetch]) + "\n")
1201 " ".join([short(f) for f in fetch]) + "\n")
1190
1202
1191 self.ui.debug(_("%d total queries\n") % reqcnt)
1203 self.ui.debug(_("%d total queries\n") % reqcnt)
1192
1204
1193 return fetch.keys()
1205 return fetch.keys()
1194
1206
1195 def findoutgoing(self, remote, base=None, heads=None, force=False):
1207 def findoutgoing(self, remote, base=None, heads=None, force=False):
1196 """Return list of nodes that are roots of subsets not in remote
1208 """Return list of nodes that are roots of subsets not in remote
1197
1209
1198 If base dict is specified, assume that these nodes and their parents
1210 If base dict is specified, assume that these nodes and their parents
1199 exist on the remote side.
1211 exist on the remote side.
1200 If a list of heads is specified, return only nodes which are heads
1212 If a list of heads is specified, return only nodes which are heads
1201 or ancestors of these heads, and return a second element which
1213 or ancestors of these heads, and return a second element which
1202 contains all remote heads which get new children.
1214 contains all remote heads which get new children.
1203 """
1215 """
1204 if base == None:
1216 if base == None:
1205 base = {}
1217 base = {}
1206 self.findincoming(remote, base, heads, force=force)
1218 self.findincoming(remote, base, heads, force=force)
1207
1219
1208 self.ui.debug(_("common changesets up to ")
1220 self.ui.debug(_("common changesets up to ")
1209 + " ".join(map(short, base.keys())) + "\n")
1221 + " ".join(map(short, base.keys())) + "\n")
1210
1222
1211 remain = dict.fromkeys(self.changelog.nodemap)
1223 remain = dict.fromkeys(self.changelog.nodemap)
1212
1224
1213 # prune everything remote has from the tree
1225 # prune everything remote has from the tree
1214 del remain[nullid]
1226 del remain[nullid]
1215 remove = base.keys()
1227 remove = base.keys()
1216 while remove:
1228 while remove:
1217 n = remove.pop(0)
1229 n = remove.pop(0)
1218 if n in remain:
1230 if n in remain:
1219 del remain[n]
1231 del remain[n]
1220 for p in self.changelog.parents(n):
1232 for p in self.changelog.parents(n):
1221 remove.append(p)
1233 remove.append(p)
1222
1234
1223 # find every node whose parents have been pruned
1235 # find every node whose parents have been pruned
1224 subset = []
1236 subset = []
1225 # find every remote head that will get new children
1237 # find every remote head that will get new children
1226 updated_heads = {}
1238 updated_heads = {}
1227 for n in remain:
1239 for n in remain:
1228 p1, p2 = self.changelog.parents(n)
1240 p1, p2 = self.changelog.parents(n)
1229 if p1 not in remain and p2 not in remain:
1241 if p1 not in remain and p2 not in remain:
1230 subset.append(n)
1242 subset.append(n)
1231 if heads:
1243 if heads:
1232 if p1 in heads:
1244 if p1 in heads:
1233 updated_heads[p1] = True
1245 updated_heads[p1] = True
1234 if p2 in heads:
1246 if p2 in heads:
1235 updated_heads[p2] = True
1247 updated_heads[p2] = True
1236
1248
1237 # this is the set of all roots we have to push
1249 # this is the set of all roots we have to push
1238 if heads:
1250 if heads:
1239 return subset, updated_heads.keys()
1251 return subset, updated_heads.keys()
1240 else:
1252 else:
1241 return subset
1253 return subset
1242
1254
1243 def pull(self, remote, heads=None, force=False, lock=None):
1255 def pull(self, remote, heads=None, force=False, lock=None):
1244 mylock = False
1256 mylock = False
1245 if not lock:
1257 if not lock:
1246 lock = self.lock()
1258 lock = self.lock()
1247 mylock = True
1259 mylock = True
1248
1260
1249 try:
1261 try:
1250 fetch = self.findincoming(remote, force=force)
1262 fetch = self.findincoming(remote, force=force)
1251 if fetch == [nullid]:
1263 if fetch == [nullid]:
1252 self.ui.status(_("requesting all changes\n"))
1264 self.ui.status(_("requesting all changes\n"))
1253
1265
1254 if not fetch:
1266 if not fetch:
1255 self.ui.status(_("no changes found\n"))
1267 self.ui.status(_("no changes found\n"))
1256 return 0
1268 return 0
1257
1269
1258 if heads is None:
1270 if heads is None:
1259 cg = remote.changegroup(fetch, 'pull')
1271 cg = remote.changegroup(fetch, 'pull')
1260 else:
1272 else:
1261 if 'changegroupsubset' not in remote.capabilities:
1273 if 'changegroupsubset' not in remote.capabilities:
1262 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1274 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1263 cg = remote.changegroupsubset(fetch, heads, 'pull')
1275 cg = remote.changegroupsubset(fetch, heads, 'pull')
1264 return self.addchangegroup(cg, 'pull', remote.url())
1276 return self.addchangegroup(cg, 'pull', remote.url())
1265 finally:
1277 finally:
1266 if mylock:
1278 if mylock:
1267 lock.release()
1279 lock.release()
1268
1280
1269 def push(self, remote, force=False, revs=None):
1281 def push(self, remote, force=False, revs=None):
1270 # there are two ways to push to remote repo:
1282 # there are two ways to push to remote repo:
1271 #
1283 #
1272 # addchangegroup assumes local user can lock remote
1284 # addchangegroup assumes local user can lock remote
1273 # repo (local filesystem, old ssh servers).
1285 # repo (local filesystem, old ssh servers).
1274 #
1286 #
1275 # unbundle assumes local user cannot lock remote repo (new ssh
1287 # unbundle assumes local user cannot lock remote repo (new ssh
1276 # servers, http servers).
1288 # servers, http servers).
1277
1289
1278 if remote.capable('unbundle'):
1290 if remote.capable('unbundle'):
1279 return self.push_unbundle(remote, force, revs)
1291 return self.push_unbundle(remote, force, revs)
1280 return self.push_addchangegroup(remote, force, revs)
1292 return self.push_addchangegroup(remote, force, revs)
1281
1293
1282 def prepush(self, remote, force, revs):
1294 def prepush(self, remote, force, revs):
1283 base = {}
1295 base = {}
1284 remote_heads = remote.heads()
1296 remote_heads = remote.heads()
1285 inc = self.findincoming(remote, base, remote_heads, force=force)
1297 inc = self.findincoming(remote, base, remote_heads, force=force)
1286 if not force and inc:
1298 if not force and inc:
1287 self.ui.warn(_("abort: unsynced remote changes!\n"))
1299 self.ui.warn(_("abort: unsynced remote changes!\n"))
1288 self.ui.status(_("(did you forget to sync?"
1300 self.ui.status(_("(did you forget to sync?"
1289 " use push -f to force)\n"))
1301 " use push -f to force)\n"))
1290 return None, 1
1302 return None, 1
1291
1303
1292 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1304 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1293 if revs is not None:
1305 if revs is not None:
1294 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1306 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1295 else:
1307 else:
1296 bases, heads = update, self.changelog.heads()
1308 bases, heads = update, self.changelog.heads()
1297
1309
1298 if not bases:
1310 if not bases:
1299 self.ui.status(_("no changes found\n"))
1311 self.ui.status(_("no changes found\n"))
1300 return None, 1
1312 return None, 1
1301 elif not force:
1313 elif not force:
1302 # FIXME we don't properly detect creation of new heads
1314 # FIXME we don't properly detect creation of new heads
1303 # in the push -r case, assume the user knows what he's doing
1315 # in the push -r case, assume the user knows what he's doing
1304 if not revs and len(remote_heads) < len(heads) \
1316 if not revs and len(remote_heads) < len(heads) \
1305 and remote_heads != [nullid]:
1317 and remote_heads != [nullid]:
1306 self.ui.warn(_("abort: push creates new remote branches!\n"))
1318 self.ui.warn(_("abort: push creates new remote branches!\n"))
1307 self.ui.status(_("(did you forget to merge?"
1319 self.ui.status(_("(did you forget to merge?"
1308 " use push -f to force)\n"))
1320 " use push -f to force)\n"))
1309 return None, 1
1321 return None, 1
1310
1322
1311 if revs is None:
1323 if revs is None:
1312 cg = self.changegroup(update, 'push')
1324 cg = self.changegroup(update, 'push')
1313 else:
1325 else:
1314 cg = self.changegroupsubset(update, revs, 'push')
1326 cg = self.changegroupsubset(update, revs, 'push')
1315 return cg, remote_heads
1327 return cg, remote_heads
1316
1328
1317 def push_addchangegroup(self, remote, force, revs):
1329 def push_addchangegroup(self, remote, force, revs):
1318 lock = remote.lock()
1330 lock = remote.lock()
1319
1331
1320 ret = self.prepush(remote, force, revs)
1332 ret = self.prepush(remote, force, revs)
1321 if ret[0] is not None:
1333 if ret[0] is not None:
1322 cg, remote_heads = ret
1334 cg, remote_heads = ret
1323 return remote.addchangegroup(cg, 'push', self.url())
1335 return remote.addchangegroup(cg, 'push', self.url())
1324 return ret[1]
1336 return ret[1]
1325
1337
1326 def push_unbundle(self, remote, force, revs):
1338 def push_unbundle(self, remote, force, revs):
1327 # local repo finds heads on server, finds out what revs it
1339 # local repo finds heads on server, finds out what revs it
1328 # must push. once revs transferred, if server finds it has
1340 # must push. once revs transferred, if server finds it has
1329 # different heads (someone else won commit/push race), server
1341 # different heads (someone else won commit/push race), server
1330 # aborts.
1342 # aborts.
1331
1343
1332 ret = self.prepush(remote, force, revs)
1344 ret = self.prepush(remote, force, revs)
1333 if ret[0] is not None:
1345 if ret[0] is not None:
1334 cg, remote_heads = ret
1346 cg, remote_heads = ret
1335 if force: remote_heads = ['force']
1347 if force: remote_heads = ['force']
1336 return remote.unbundle(cg, remote_heads, 'push')
1348 return remote.unbundle(cg, remote_heads, 'push')
1337 return ret[1]
1349 return ret[1]
1338
1350
1339 def changegroupinfo(self, nodes):
1351 def changegroupinfo(self, nodes):
1340 self.ui.note(_("%d changesets found\n") % len(nodes))
1352 self.ui.note(_("%d changesets found\n") % len(nodes))
1341 if self.ui.debugflag:
1353 if self.ui.debugflag:
1342 self.ui.debug(_("List of changesets:\n"))
1354 self.ui.debug(_("List of changesets:\n"))
1343 for node in nodes:
1355 for node in nodes:
1344 self.ui.debug("%s\n" % hex(node))
1356 self.ui.debug("%s\n" % hex(node))
1345
1357
1346 def changegroupsubset(self, bases, heads, source):
1358 def changegroupsubset(self, bases, heads, source):
1347 """This function generates a changegroup consisting of all the nodes
1359 """This function generates a changegroup consisting of all the nodes
1348 that are descendents of any of the bases, and ancestors of any of
1360 that are descendents of any of the bases, and ancestors of any of
1349 the heads.
1361 the heads.
1350
1362
1351 It is fairly complex as determining which filenodes and which
1363 It is fairly complex as determining which filenodes and which
1352 manifest nodes need to be included for the changeset to be complete
1364 manifest nodes need to be included for the changeset to be complete
1353 is non-trivial.
1365 is non-trivial.
1354
1366
1355 Another wrinkle is doing the reverse, figuring out which changeset in
1367 Another wrinkle is doing the reverse, figuring out which changeset in
1356 the changegroup a particular filenode or manifestnode belongs to."""
1368 the changegroup a particular filenode or manifestnode belongs to."""
1357
1369
1358 self.hook('preoutgoing', throw=True, source=source)
1370 self.hook('preoutgoing', throw=True, source=source)
1359
1371
1360 # Set up some initial variables
1372 # Set up some initial variables
1361 # Make it easy to refer to self.changelog
1373 # Make it easy to refer to self.changelog
1362 cl = self.changelog
1374 cl = self.changelog
1363 # msng is short for missing - compute the list of changesets in this
1375 # msng is short for missing - compute the list of changesets in this
1364 # changegroup.
1376 # changegroup.
1365 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1377 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1366 self.changegroupinfo(msng_cl_lst)
1378 self.changegroupinfo(msng_cl_lst)
1367 # Some bases may turn out to be superfluous, and some heads may be
1379 # Some bases may turn out to be superfluous, and some heads may be
1368 # too. nodesbetween will return the minimal set of bases and heads
1380 # too. nodesbetween will return the minimal set of bases and heads
1369 # necessary to re-create the changegroup.
1381 # necessary to re-create the changegroup.
1370
1382
1371 # Known heads are the list of heads that it is assumed the recipient
1383 # Known heads are the list of heads that it is assumed the recipient
1372 # of this changegroup will know about.
1384 # of this changegroup will know about.
1373 knownheads = {}
1385 knownheads = {}
1374 # We assume that all parents of bases are known heads.
1386 # We assume that all parents of bases are known heads.
1375 for n in bases:
1387 for n in bases:
1376 for p in cl.parents(n):
1388 for p in cl.parents(n):
1377 if p != nullid:
1389 if p != nullid:
1378 knownheads[p] = 1
1390 knownheads[p] = 1
1379 knownheads = knownheads.keys()
1391 knownheads = knownheads.keys()
1380 if knownheads:
1392 if knownheads:
1381 # Now that we know what heads are known, we can compute which
1393 # Now that we know what heads are known, we can compute which
1382 # changesets are known. The recipient must know about all
1394 # changesets are known. The recipient must know about all
1383 # changesets required to reach the known heads from the null
1395 # changesets required to reach the known heads from the null
1384 # changeset.
1396 # changeset.
1385 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1397 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1386 junk = None
1398 junk = None
1387 # Transform the list into an ersatz set.
1399 # Transform the list into an ersatz set.
1388 has_cl_set = dict.fromkeys(has_cl_set)
1400 has_cl_set = dict.fromkeys(has_cl_set)
1389 else:
1401 else:
1390 # If there were no known heads, the recipient cannot be assumed to
1402 # If there were no known heads, the recipient cannot be assumed to
1391 # know about any changesets.
1403 # know about any changesets.
1392 has_cl_set = {}
1404 has_cl_set = {}
1393
1405
1394 # Make it easy to refer to self.manifest
1406 # Make it easy to refer to self.manifest
1395 mnfst = self.manifest
1407 mnfst = self.manifest
1396 # We don't know which manifests are missing yet
1408 # We don't know which manifests are missing yet
1397 msng_mnfst_set = {}
1409 msng_mnfst_set = {}
1398 # Nor do we know which filenodes are missing.
1410 # Nor do we know which filenodes are missing.
1399 msng_filenode_set = {}
1411 msng_filenode_set = {}
1400
1412
1401 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1413 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1402 junk = None
1414 junk = None
1403
1415
1404 # A changeset always belongs to itself, so the changenode lookup
1416 # A changeset always belongs to itself, so the changenode lookup
1405 # function for a changenode is identity.
1417 # function for a changenode is identity.
1406 def identity(x):
1418 def identity(x):
1407 return x
1419 return x
1408
1420
1409 # A function generating function. Sets up an environment for the
1421 # A function generating function. Sets up an environment for the
1410 # inner function.
1422 # inner function.
1411 def cmp_by_rev_func(revlog):
1423 def cmp_by_rev_func(revlog):
1412 # Compare two nodes by their revision number in the environment's
1424 # Compare two nodes by their revision number in the environment's
1413 # revision history. Since the revision number both represents the
1425 # revision history. Since the revision number both represents the
1414 # most efficient order to read the nodes in, and represents a
1426 # most efficient order to read the nodes in, and represents a
1415 # topological sorting of the nodes, this function is often useful.
1427 # topological sorting of the nodes, this function is often useful.
1416 def cmp_by_rev(a, b):
1428 def cmp_by_rev(a, b):
1417 return cmp(revlog.rev(a), revlog.rev(b))
1429 return cmp(revlog.rev(a), revlog.rev(b))
1418 return cmp_by_rev
1430 return cmp_by_rev
1419
1431
1420 # If we determine that a particular file or manifest node must be a
1432 # If we determine that a particular file or manifest node must be a
1421 # node that the recipient of the changegroup will already have, we can
1433 # node that the recipient of the changegroup will already have, we can
1422 # also assume the recipient will have all the parents. This function
1434 # also assume the recipient will have all the parents. This function
1423 # prunes them from the set of missing nodes.
1435 # prunes them from the set of missing nodes.
1424 def prune_parents(revlog, hasset, msngset):
1436 def prune_parents(revlog, hasset, msngset):
1425 haslst = hasset.keys()
1437 haslst = hasset.keys()
1426 haslst.sort(cmp_by_rev_func(revlog))
1438 haslst.sort(cmp_by_rev_func(revlog))
1427 for node in haslst:
1439 for node in haslst:
1428 parentlst = [p for p in revlog.parents(node) if p != nullid]
1440 parentlst = [p for p in revlog.parents(node) if p != nullid]
1429 while parentlst:
1441 while parentlst:
1430 n = parentlst.pop()
1442 n = parentlst.pop()
1431 if n not in hasset:
1443 if n not in hasset:
1432 hasset[n] = 1
1444 hasset[n] = 1
1433 p = [p for p in revlog.parents(n) if p != nullid]
1445 p = [p for p in revlog.parents(n) if p != nullid]
1434 parentlst.extend(p)
1446 parentlst.extend(p)
1435 for n in hasset:
1447 for n in hasset:
1436 msngset.pop(n, None)
1448 msngset.pop(n, None)
1437
1449
1438 # This is a function generating function used to set up an environment
1450 # This is a function generating function used to set up an environment
1439 # for the inner function to execute in.
1451 # for the inner function to execute in.
1440 def manifest_and_file_collector(changedfileset):
1452 def manifest_and_file_collector(changedfileset):
1441 # This is an information gathering function that gathers
1453 # This is an information gathering function that gathers
1442 # information from each changeset node that goes out as part of
1454 # information from each changeset node that goes out as part of
1443 # the changegroup. The information gathered is a list of which
1455 # the changegroup. The information gathered is a list of which
1444 # manifest nodes are potentially required (the recipient may
1456 # manifest nodes are potentially required (the recipient may
1445 # already have them) and total list of all files which were
1457 # already have them) and total list of all files which were
1446 # changed in any changeset in the changegroup.
1458 # changed in any changeset in the changegroup.
1447 #
1459 #
1448 # We also remember the first changenode we saw any manifest
1460 # We also remember the first changenode we saw any manifest
1449 # referenced by so we can later determine which changenode 'owns'
1461 # referenced by so we can later determine which changenode 'owns'
1450 # the manifest.
1462 # the manifest.
1451 def collect_manifests_and_files(clnode):
1463 def collect_manifests_and_files(clnode):
1452 c = cl.read(clnode)
1464 c = cl.read(clnode)
1453 for f in c[3]:
1465 for f in c[3]:
1454 # This is to make sure we only have one instance of each
1466 # This is to make sure we only have one instance of each
1455 # filename string for each filename.
1467 # filename string for each filename.
1456 changedfileset.setdefault(f, f)
1468 changedfileset.setdefault(f, f)
1457 msng_mnfst_set.setdefault(c[0], clnode)
1469 msng_mnfst_set.setdefault(c[0], clnode)
1458 return collect_manifests_and_files
1470 return collect_manifests_and_files
1459
1471
1460 # Figure out which manifest nodes (of the ones we think might be part
1472 # Figure out which manifest nodes (of the ones we think might be part
1461 # of the changegroup) the recipient must know about and remove them
1473 # of the changegroup) the recipient must know about and remove them
1462 # from the changegroup.
1474 # from the changegroup.
1463 def prune_manifests():
1475 def prune_manifests():
1464 has_mnfst_set = {}
1476 has_mnfst_set = {}
1465 for n in msng_mnfst_set:
1477 for n in msng_mnfst_set:
1466 # If a 'missing' manifest thinks it belongs to a changenode
1478 # If a 'missing' manifest thinks it belongs to a changenode
1467 # the recipient is assumed to have, obviously the recipient
1479 # the recipient is assumed to have, obviously the recipient
1468 # must have that manifest.
1480 # must have that manifest.
1469 linknode = cl.node(mnfst.linkrev(n))
1481 linknode = cl.node(mnfst.linkrev(n))
1470 if linknode in has_cl_set:
1482 if linknode in has_cl_set:
1471 has_mnfst_set[n] = 1
1483 has_mnfst_set[n] = 1
1472 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1484 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1473
1485
1474 # Use the information collected in collect_manifests_and_files to say
1486 # Use the information collected in collect_manifests_and_files to say
1475 # which changenode any manifestnode belongs to.
1487 # which changenode any manifestnode belongs to.
1476 def lookup_manifest_link(mnfstnode):
1488 def lookup_manifest_link(mnfstnode):
1477 return msng_mnfst_set[mnfstnode]
1489 return msng_mnfst_set[mnfstnode]
1478
1490
1479 # A function generating function that sets up the initial environment
1491 # A function generating function that sets up the initial environment
1480 # the inner function.
1492 # the inner function.
1481 def filenode_collector(changedfiles):
1493 def filenode_collector(changedfiles):
1482 next_rev = [0]
1494 next_rev = [0]
1483 # This gathers information from each manifestnode included in the
1495 # This gathers information from each manifestnode included in the
1484 # changegroup about which filenodes the manifest node references
1496 # changegroup about which filenodes the manifest node references
1485 # so we can include those in the changegroup too.
1497 # so we can include those in the changegroup too.
1486 #
1498 #
1487 # It also remembers which changenode each filenode belongs to. It
1499 # It also remembers which changenode each filenode belongs to. It
1488 # does this by assuming the a filenode belongs to the changenode
1500 # does this by assuming the a filenode belongs to the changenode
1489 # the first manifest that references it belongs to.
1501 # the first manifest that references it belongs to.
1490 def collect_msng_filenodes(mnfstnode):
1502 def collect_msng_filenodes(mnfstnode):
1491 r = mnfst.rev(mnfstnode)
1503 r = mnfst.rev(mnfstnode)
1492 if r == next_rev[0]:
1504 if r == next_rev[0]:
1493 # If the last rev we looked at was the one just previous,
1505 # If the last rev we looked at was the one just previous,
1494 # we only need to see a diff.
1506 # we only need to see a diff.
1495 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1507 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1496 # For each line in the delta
1508 # For each line in the delta
1497 for dline in delta.splitlines():
1509 for dline in delta.splitlines():
1498 # get the filename and filenode for that line
1510 # get the filename and filenode for that line
1499 f, fnode = dline.split('\0')
1511 f, fnode = dline.split('\0')
1500 fnode = bin(fnode[:40])
1512 fnode = bin(fnode[:40])
1501 f = changedfiles.get(f, None)
1513 f = changedfiles.get(f, None)
1502 # And if the file is in the list of files we care
1514 # And if the file is in the list of files we care
1503 # about.
1515 # about.
1504 if f is not None:
1516 if f is not None:
1505 # Get the changenode this manifest belongs to
1517 # Get the changenode this manifest belongs to
1506 clnode = msng_mnfst_set[mnfstnode]
1518 clnode = msng_mnfst_set[mnfstnode]
1507 # Create the set of filenodes for the file if
1519 # Create the set of filenodes for the file if
1508 # there isn't one already.
1520 # there isn't one already.
1509 ndset = msng_filenode_set.setdefault(f, {})
1521 ndset = msng_filenode_set.setdefault(f, {})
1510 # And set the filenode's changelog node to the
1522 # And set the filenode's changelog node to the
1511 # manifest's if it hasn't been set already.
1523 # manifest's if it hasn't been set already.
1512 ndset.setdefault(fnode, clnode)
1524 ndset.setdefault(fnode, clnode)
1513 else:
1525 else:
1514 # Otherwise we need a full manifest.
1526 # Otherwise we need a full manifest.
1515 m = mnfst.read(mnfstnode)
1527 m = mnfst.read(mnfstnode)
1516 # For every file in we care about.
1528 # For every file in we care about.
1517 for f in changedfiles:
1529 for f in changedfiles:
1518 fnode = m.get(f, None)
1530 fnode = m.get(f, None)
1519 # If it's in the manifest
1531 # If it's in the manifest
1520 if fnode is not None:
1532 if fnode is not None:
1521 # See comments above.
1533 # See comments above.
1522 clnode = msng_mnfst_set[mnfstnode]
1534 clnode = msng_mnfst_set[mnfstnode]
1523 ndset = msng_filenode_set.setdefault(f, {})
1535 ndset = msng_filenode_set.setdefault(f, {})
1524 ndset.setdefault(fnode, clnode)
1536 ndset.setdefault(fnode, clnode)
1525 # Remember the revision we hope to see next.
1537 # Remember the revision we hope to see next.
1526 next_rev[0] = r + 1
1538 next_rev[0] = r + 1
1527 return collect_msng_filenodes
1539 return collect_msng_filenodes
1528
1540
1529 # We have a list of filenodes we think we need for a file, lets remove
1541 # We have a list of filenodes we think we need for a file, lets remove
1530 # all those we now the recipient must have.
1542 # all those we now the recipient must have.
1531 def prune_filenodes(f, filerevlog):
1543 def prune_filenodes(f, filerevlog):
1532 msngset = msng_filenode_set[f]
1544 msngset = msng_filenode_set[f]
1533 hasset = {}
1545 hasset = {}
1534 # If a 'missing' filenode thinks it belongs to a changenode we
1546 # If a 'missing' filenode thinks it belongs to a changenode we
1535 # assume the recipient must have, then the recipient must have
1547 # assume the recipient must have, then the recipient must have
1536 # that filenode.
1548 # that filenode.
1537 for n in msngset:
1549 for n in msngset:
1538 clnode = cl.node(filerevlog.linkrev(n))
1550 clnode = cl.node(filerevlog.linkrev(n))
1539 if clnode in has_cl_set:
1551 if clnode in has_cl_set:
1540 hasset[n] = 1
1552 hasset[n] = 1
1541 prune_parents(filerevlog, hasset, msngset)
1553 prune_parents(filerevlog, hasset, msngset)
1542
1554
1543 # A function generator function that sets up the a context for the
1555 # A function generator function that sets up the a context for the
1544 # inner function.
1556 # inner function.
1545 def lookup_filenode_link_func(fname):
1557 def lookup_filenode_link_func(fname):
1546 msngset = msng_filenode_set[fname]
1558 msngset = msng_filenode_set[fname]
1547 # Lookup the changenode the filenode belongs to.
1559 # Lookup the changenode the filenode belongs to.
1548 def lookup_filenode_link(fnode):
1560 def lookup_filenode_link(fnode):
1549 return msngset[fnode]
1561 return msngset[fnode]
1550 return lookup_filenode_link
1562 return lookup_filenode_link
1551
1563
1552 # Now that we have all theses utility functions to help out and
1564 # Now that we have all theses utility functions to help out and
1553 # logically divide up the task, generate the group.
1565 # logically divide up the task, generate the group.
1554 def gengroup():
1566 def gengroup():
1555 # The set of changed files starts empty.
1567 # The set of changed files starts empty.
1556 changedfiles = {}
1568 changedfiles = {}
1557 # Create a changenode group generator that will call our functions
1569 # Create a changenode group generator that will call our functions
1558 # back to lookup the owning changenode and collect information.
1570 # back to lookup the owning changenode and collect information.
1559 group = cl.group(msng_cl_lst, identity,
1571 group = cl.group(msng_cl_lst, identity,
1560 manifest_and_file_collector(changedfiles))
1572 manifest_and_file_collector(changedfiles))
1561 for chnk in group:
1573 for chnk in group:
1562 yield chnk
1574 yield chnk
1563
1575
1564 # The list of manifests has been collected by the generator
1576 # The list of manifests has been collected by the generator
1565 # calling our functions back.
1577 # calling our functions back.
1566 prune_manifests()
1578 prune_manifests()
1567 msng_mnfst_lst = msng_mnfst_set.keys()
1579 msng_mnfst_lst = msng_mnfst_set.keys()
1568 # Sort the manifestnodes by revision number.
1580 # Sort the manifestnodes by revision number.
1569 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1581 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1570 # Create a generator for the manifestnodes that calls our lookup
1582 # Create a generator for the manifestnodes that calls our lookup
1571 # and data collection functions back.
1583 # and data collection functions back.
1572 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1584 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1573 filenode_collector(changedfiles))
1585 filenode_collector(changedfiles))
1574 for chnk in group:
1586 for chnk in group:
1575 yield chnk
1587 yield chnk
1576
1588
1577 # These are no longer needed, dereference and toss the memory for
1589 # These are no longer needed, dereference and toss the memory for
1578 # them.
1590 # them.
1579 msng_mnfst_lst = None
1591 msng_mnfst_lst = None
1580 msng_mnfst_set.clear()
1592 msng_mnfst_set.clear()
1581
1593
1582 changedfiles = changedfiles.keys()
1594 changedfiles = changedfiles.keys()
1583 changedfiles.sort()
1595 changedfiles.sort()
1584 # Go through all our files in order sorted by name.
1596 # Go through all our files in order sorted by name.
1585 for fname in changedfiles:
1597 for fname in changedfiles:
1586 filerevlog = self.file(fname)
1598 filerevlog = self.file(fname)
1587 # Toss out the filenodes that the recipient isn't really
1599 # Toss out the filenodes that the recipient isn't really
1588 # missing.
1600 # missing.
1589 if msng_filenode_set.has_key(fname):
1601 if msng_filenode_set.has_key(fname):
1590 prune_filenodes(fname, filerevlog)
1602 prune_filenodes(fname, filerevlog)
1591 msng_filenode_lst = msng_filenode_set[fname].keys()
1603 msng_filenode_lst = msng_filenode_set[fname].keys()
1592 else:
1604 else:
1593 msng_filenode_lst = []
1605 msng_filenode_lst = []
1594 # If any filenodes are left, generate the group for them,
1606 # If any filenodes are left, generate the group for them,
1595 # otherwise don't bother.
1607 # otherwise don't bother.
1596 if len(msng_filenode_lst) > 0:
1608 if len(msng_filenode_lst) > 0:
1597 yield changegroup.genchunk(fname)
1609 yield changegroup.genchunk(fname)
1598 # Sort the filenodes by their revision #
1610 # Sort the filenodes by their revision #
1599 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1611 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1600 # Create a group generator and only pass in a changenode
1612 # Create a group generator and only pass in a changenode
1601 # lookup function as we need to collect no information
1613 # lookup function as we need to collect no information
1602 # from filenodes.
1614 # from filenodes.
1603 group = filerevlog.group(msng_filenode_lst,
1615 group = filerevlog.group(msng_filenode_lst,
1604 lookup_filenode_link_func(fname))
1616 lookup_filenode_link_func(fname))
1605 for chnk in group:
1617 for chnk in group:
1606 yield chnk
1618 yield chnk
1607 if msng_filenode_set.has_key(fname):
1619 if msng_filenode_set.has_key(fname):
1608 # Don't need this anymore, toss it to free memory.
1620 # Don't need this anymore, toss it to free memory.
1609 del msng_filenode_set[fname]
1621 del msng_filenode_set[fname]
1610 # Signal that no more groups are left.
1622 # Signal that no more groups are left.
1611 yield changegroup.closechunk()
1623 yield changegroup.closechunk()
1612
1624
1613 if msng_cl_lst:
1625 if msng_cl_lst:
1614 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1626 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1615
1627
1616 return util.chunkbuffer(gengroup())
1628 return util.chunkbuffer(gengroup())
1617
1629
1618 def changegroup(self, basenodes, source):
1630 def changegroup(self, basenodes, source):
1619 """Generate a changegroup of all nodes that we have that a recipient
1631 """Generate a changegroup of all nodes that we have that a recipient
1620 doesn't.
1632 doesn't.
1621
1633
1622 This is much easier than the previous function as we can assume that
1634 This is much easier than the previous function as we can assume that
1623 the recipient has any changenode we aren't sending them."""
1635 the recipient has any changenode we aren't sending them."""
1624
1636
1625 self.hook('preoutgoing', throw=True, source=source)
1637 self.hook('preoutgoing', throw=True, source=source)
1626
1638
1627 cl = self.changelog
1639 cl = self.changelog
1628 nodes = cl.nodesbetween(basenodes, None)[0]
1640 nodes = cl.nodesbetween(basenodes, None)[0]
1629 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1641 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1630 self.changegroupinfo(nodes)
1642 self.changegroupinfo(nodes)
1631
1643
1632 def identity(x):
1644 def identity(x):
1633 return x
1645 return x
1634
1646
1635 def gennodelst(revlog):
1647 def gennodelst(revlog):
1636 for r in xrange(0, revlog.count()):
1648 for r in xrange(0, revlog.count()):
1637 n = revlog.node(r)
1649 n = revlog.node(r)
1638 if revlog.linkrev(n) in revset:
1650 if revlog.linkrev(n) in revset:
1639 yield n
1651 yield n
1640
1652
1641 def changed_file_collector(changedfileset):
1653 def changed_file_collector(changedfileset):
1642 def collect_changed_files(clnode):
1654 def collect_changed_files(clnode):
1643 c = cl.read(clnode)
1655 c = cl.read(clnode)
1644 for fname in c[3]:
1656 for fname in c[3]:
1645 changedfileset[fname] = 1
1657 changedfileset[fname] = 1
1646 return collect_changed_files
1658 return collect_changed_files
1647
1659
1648 def lookuprevlink_func(revlog):
1660 def lookuprevlink_func(revlog):
1649 def lookuprevlink(n):
1661 def lookuprevlink(n):
1650 return cl.node(revlog.linkrev(n))
1662 return cl.node(revlog.linkrev(n))
1651 return lookuprevlink
1663 return lookuprevlink
1652
1664
1653 def gengroup():
1665 def gengroup():
1654 # construct a list of all changed files
1666 # construct a list of all changed files
1655 changedfiles = {}
1667 changedfiles = {}
1656
1668
1657 for chnk in cl.group(nodes, identity,
1669 for chnk in cl.group(nodes, identity,
1658 changed_file_collector(changedfiles)):
1670 changed_file_collector(changedfiles)):
1659 yield chnk
1671 yield chnk
1660 changedfiles = changedfiles.keys()
1672 changedfiles = changedfiles.keys()
1661 changedfiles.sort()
1673 changedfiles.sort()
1662
1674
1663 mnfst = self.manifest
1675 mnfst = self.manifest
1664 nodeiter = gennodelst(mnfst)
1676 nodeiter = gennodelst(mnfst)
1665 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1677 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1666 yield chnk
1678 yield chnk
1667
1679
1668 for fname in changedfiles:
1680 for fname in changedfiles:
1669 filerevlog = self.file(fname)
1681 filerevlog = self.file(fname)
1670 nodeiter = gennodelst(filerevlog)
1682 nodeiter = gennodelst(filerevlog)
1671 nodeiter = list(nodeiter)
1683 nodeiter = list(nodeiter)
1672 if nodeiter:
1684 if nodeiter:
1673 yield changegroup.genchunk(fname)
1685 yield changegroup.genchunk(fname)
1674 lookup = lookuprevlink_func(filerevlog)
1686 lookup = lookuprevlink_func(filerevlog)
1675 for chnk in filerevlog.group(nodeiter, lookup):
1687 for chnk in filerevlog.group(nodeiter, lookup):
1676 yield chnk
1688 yield chnk
1677
1689
1678 yield changegroup.closechunk()
1690 yield changegroup.closechunk()
1679
1691
1680 if nodes:
1692 if nodes:
1681 self.hook('outgoing', node=hex(nodes[0]), source=source)
1693 self.hook('outgoing', node=hex(nodes[0]), source=source)
1682
1694
1683 return util.chunkbuffer(gengroup())
1695 return util.chunkbuffer(gengroup())
1684
1696
1685 def addchangegroup(self, source, srctype, url):
1697 def addchangegroup(self, source, srctype, url):
1686 """add changegroup to repo.
1698 """add changegroup to repo.
1687 returns number of heads modified or added + 1."""
1699 returns number of heads modified or added + 1."""
1688
1700
1689 def csmap(x):
1701 def csmap(x):
1690 self.ui.debug(_("add changeset %s\n") % short(x))
1702 self.ui.debug(_("add changeset %s\n") % short(x))
1691 return cl.count()
1703 return cl.count()
1692
1704
1693 def revmap(x):
1705 def revmap(x):
1694 return cl.rev(x)
1706 return cl.rev(x)
1695
1707
1696 if not source:
1708 if not source:
1697 return 0
1709 return 0
1698
1710
1699 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1711 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1700
1712
1701 changesets = files = revisions = 0
1713 changesets = files = revisions = 0
1702
1714
1703 tr = self.transaction()
1715 tr = self.transaction()
1704
1716
1705 # write changelog data to temp files so concurrent readers will not see
1717 # write changelog data to temp files so concurrent readers will not see
1706 # inconsistent view
1718 # inconsistent view
1707 cl = None
1719 cl = None
1708 try:
1720 try:
1709 cl = appendfile.appendchangelog(self.sopener,
1721 cl = appendfile.appendchangelog(self.sopener,
1710 self.changelog.version)
1722 self.changelog.version)
1711
1723
1712 oldheads = len(cl.heads())
1724 oldheads = len(cl.heads())
1713
1725
1714 # pull off the changeset group
1726 # pull off the changeset group
1715 self.ui.status(_("adding changesets\n"))
1727 self.ui.status(_("adding changesets\n"))
1716 cor = cl.count() - 1
1728 cor = cl.count() - 1
1717 chunkiter = changegroup.chunkiter(source)
1729 chunkiter = changegroup.chunkiter(source)
1718 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1730 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1719 raise util.Abort(_("received changelog group is empty"))
1731 raise util.Abort(_("received changelog group is empty"))
1720 cnr = cl.count() - 1
1732 cnr = cl.count() - 1
1721 changesets = cnr - cor
1733 changesets = cnr - cor
1722
1734
1723 # pull off the manifest group
1735 # pull off the manifest group
1724 self.ui.status(_("adding manifests\n"))
1736 self.ui.status(_("adding manifests\n"))
1725 chunkiter = changegroup.chunkiter(source)
1737 chunkiter = changegroup.chunkiter(source)
1726 # no need to check for empty manifest group here:
1738 # no need to check for empty manifest group here:
1727 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1739 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1728 # no new manifest will be created and the manifest group will
1740 # no new manifest will be created and the manifest group will
1729 # be empty during the pull
1741 # be empty during the pull
1730 self.manifest.addgroup(chunkiter, revmap, tr)
1742 self.manifest.addgroup(chunkiter, revmap, tr)
1731
1743
1732 # process the files
1744 # process the files
1733 self.ui.status(_("adding file changes\n"))
1745 self.ui.status(_("adding file changes\n"))
1734 while 1:
1746 while 1:
1735 f = changegroup.getchunk(source)
1747 f = changegroup.getchunk(source)
1736 if not f:
1748 if not f:
1737 break
1749 break
1738 self.ui.debug(_("adding %s revisions\n") % f)
1750 self.ui.debug(_("adding %s revisions\n") % f)
1739 fl = self.file(f)
1751 fl = self.file(f)
1740 o = fl.count()
1752 o = fl.count()
1741 chunkiter = changegroup.chunkiter(source)
1753 chunkiter = changegroup.chunkiter(source)
1742 if fl.addgroup(chunkiter, revmap, tr) is None:
1754 if fl.addgroup(chunkiter, revmap, tr) is None:
1743 raise util.Abort(_("received file revlog group is empty"))
1755 raise util.Abort(_("received file revlog group is empty"))
1744 revisions += fl.count() - o
1756 revisions += fl.count() - o
1745 files += 1
1757 files += 1
1746
1758
1747 cl.writedata()
1759 cl.writedata()
1748 finally:
1760 finally:
1749 if cl:
1761 if cl:
1750 cl.cleanup()
1762 cl.cleanup()
1751
1763
1752 # make changelog see real files again
1764 # make changelog see real files again
1753 self.changelog = changelog.changelog(self.sopener,
1765 self.changelog = changelog.changelog(self.sopener,
1754 self.changelog.version)
1766 self.changelog.version)
1755 self.changelog.checkinlinesize(tr)
1767 self.changelog.checkinlinesize(tr)
1756
1768
1757 newheads = len(self.changelog.heads())
1769 newheads = len(self.changelog.heads())
1758 heads = ""
1770 heads = ""
1759 if oldheads and newheads != oldheads:
1771 if oldheads and newheads != oldheads:
1760 heads = _(" (%+d heads)") % (newheads - oldheads)
1772 heads = _(" (%+d heads)") % (newheads - oldheads)
1761
1773
1762 self.ui.status(_("added %d changesets"
1774 self.ui.status(_("added %d changesets"
1763 " with %d changes to %d files%s\n")
1775 " with %d changes to %d files%s\n")
1764 % (changesets, revisions, files, heads))
1776 % (changesets, revisions, files, heads))
1765
1777
1766 if changesets > 0:
1778 if changesets > 0:
1767 self.hook('pretxnchangegroup', throw=True,
1779 self.hook('pretxnchangegroup', throw=True,
1768 node=hex(self.changelog.node(cor+1)), source=srctype,
1780 node=hex(self.changelog.node(cor+1)), source=srctype,
1769 url=url)
1781 url=url)
1770
1782
1771 tr.close()
1783 tr.close()
1772
1784
1773 if changesets > 0:
1785 if changesets > 0:
1774 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1786 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1775 source=srctype, url=url)
1787 source=srctype, url=url)
1776
1788
1777 for i in xrange(cor + 1, cnr + 1):
1789 for i in xrange(cor + 1, cnr + 1):
1778 self.hook("incoming", node=hex(self.changelog.node(i)),
1790 self.hook("incoming", node=hex(self.changelog.node(i)),
1779 source=srctype, url=url)
1791 source=srctype, url=url)
1780
1792
1781 return newheads - oldheads + 1
1793 return newheads - oldheads + 1
1782
1794
1783
1795
1784 def stream_in(self, remote):
1796 def stream_in(self, remote):
1785 fp = remote.stream_out()
1797 fp = remote.stream_out()
1786 resp = int(fp.readline())
1798 resp = int(fp.readline())
1787 if resp != 0:
1799 if resp != 0:
1788 raise util.Abort(_('operation forbidden by server'))
1800 raise util.Abort(_('operation forbidden by server'))
1789 self.ui.status(_('streaming all changes\n'))
1801 self.ui.status(_('streaming all changes\n'))
1790 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1802 total_files, total_bytes = map(int, fp.readline().split(' ', 1))
1791 self.ui.status(_('%d files to transfer, %s of data\n') %
1803 self.ui.status(_('%d files to transfer, %s of data\n') %
1792 (total_files, util.bytecount(total_bytes)))
1804 (total_files, util.bytecount(total_bytes)))
1793 start = time.time()
1805 start = time.time()
1794 for i in xrange(total_files):
1806 for i in xrange(total_files):
1795 name, size = fp.readline().split('\0', 1)
1807 name, size = fp.readline().split('\0', 1)
1796 size = int(size)
1808 size = int(size)
1797 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1809 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1798 ofp = self.sopener(name, 'w')
1810 ofp = self.sopener(name, 'w')
1799 for chunk in util.filechunkiter(fp, limit=size):
1811 for chunk in util.filechunkiter(fp, limit=size):
1800 ofp.write(chunk)
1812 ofp.write(chunk)
1801 ofp.close()
1813 ofp.close()
1802 elapsed = time.time() - start
1814 elapsed = time.time() - start
1803 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1815 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1804 (util.bytecount(total_bytes), elapsed,
1816 (util.bytecount(total_bytes), elapsed,
1805 util.bytecount(total_bytes / elapsed)))
1817 util.bytecount(total_bytes / elapsed)))
1806 self.reload()
1818 self.reload()
1807 return len(self.heads()) + 1
1819 return len(self.heads()) + 1
1808
1820
1809 def clone(self, remote, heads=[], stream=False):
1821 def clone(self, remote, heads=[], stream=False):
1810 '''clone remote repository.
1822 '''clone remote repository.
1811
1823
1812 keyword arguments:
1824 keyword arguments:
1813 heads: list of revs to clone (forces use of pull)
1825 heads: list of revs to clone (forces use of pull)
1814 stream: use streaming clone if possible'''
1826 stream: use streaming clone if possible'''
1815
1827
1816 # now, all clients that can request uncompressed clones can
1828 # now, all clients that can request uncompressed clones can
1817 # read repo formats supported by all servers that can serve
1829 # read repo formats supported by all servers that can serve
1818 # them.
1830 # them.
1819
1831
1820 # if revlog format changes, client will have to check version
1832 # if revlog format changes, client will have to check version
1821 # and format flags on "stream" capability, and use
1833 # and format flags on "stream" capability, and use
1822 # uncompressed only if compatible.
1834 # uncompressed only if compatible.
1823
1835
1824 if stream and not heads and remote.capable('stream'):
1836 if stream and not heads and remote.capable('stream'):
1825 return self.stream_in(remote)
1837 return self.stream_in(remote)
1826 return self.pull(remote, heads)
1838 return self.pull(remote, heads)
1827
1839
1828 # used to avoid circular references so destructors work
1840 # used to avoid circular references so destructors work
1829 def aftertrans(base):
1841 def aftertrans(base):
1830 p = base
1842 p = base
1831 def a():
1843 def a():
1832 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1844 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1833 util.rename(os.path.join(p, "journal.dirstate"),
1845 util.rename(os.path.join(p, "journal.dirstate"),
1834 os.path.join(p, "undo.dirstate"))
1846 os.path.join(p, "undo.dirstate"))
1835 return a
1847 return a
1836
1848
1837 def instance(ui, path, create):
1849 def instance(ui, path, create):
1838 return localrepository(ui, util.drop_scheme('file', path), create)
1850 return localrepository(ui, util.drop_scheme('file', path), create)
1839
1851
1840 def islocal(path):
1852 def islocal(path):
1841 return True
1853 return True
General Comments 0
You need to be logged in to leave comments. Login now