##// END OF EJS Templates
dirstate: fold statwalk and walk
Matt Mackall -
r6755:f8299c84 default
parent child Browse files
Show More
@@ -1,699 +1,693 b''
1 """
1 """
2 dirstate.py - working directory tracking for mercurial
2 dirstate.py - working directory tracking for mercurial
3
3
4 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5
5
6 This software may be used and distributed according to the terms
6 This software may be used and distributed according to the terms
7 of the GNU General Public License, incorporated herein by reference.
7 of the GNU General Public License, incorporated herein by reference.
8 """
8 """
9
9
10 from node import nullid
10 from node import nullid
11 from i18n import _
11 from i18n import _
12 import struct, os, bisect, stat, strutil, util, errno, ignore
12 import struct, os, bisect, stat, strutil, util, errno, ignore
13 import cStringIO, osutil, sys
13 import cStringIO, osutil, sys
14
14
15 _unknown = ('?', 0, 0, 0)
15 _unknown = ('?', 0, 0, 0)
16 _format = ">cllll"
16 _format = ">cllll"
17
17
18 class dirstate(object):
18 class dirstate(object):
19
19
20 def __init__(self, opener, ui, root):
20 def __init__(self, opener, ui, root):
21 self._opener = opener
21 self._opener = opener
22 self._root = root
22 self._root = root
23 self._dirty = False
23 self._dirty = False
24 self._dirtypl = False
24 self._dirtypl = False
25 self._ui = ui
25 self._ui = ui
26
26
27 def __getattr__(self, name):
27 def __getattr__(self, name):
28 if name == '_map':
28 if name == '_map':
29 self._read()
29 self._read()
30 return self._map
30 return self._map
31 elif name == '_copymap':
31 elif name == '_copymap':
32 self._read()
32 self._read()
33 return self._copymap
33 return self._copymap
34 elif name == '_foldmap':
34 elif name == '_foldmap':
35 _foldmap = {}
35 _foldmap = {}
36 for name in self._map:
36 for name in self._map:
37 norm = os.path.normcase(os.path.normpath(name))
37 norm = os.path.normcase(os.path.normpath(name))
38 _foldmap[norm] = name
38 _foldmap[norm] = name
39 self._foldmap = _foldmap
39 self._foldmap = _foldmap
40 return self._foldmap
40 return self._foldmap
41 elif name == '_branch':
41 elif name == '_branch':
42 try:
42 try:
43 self._branch = (self._opener("branch").read().strip()
43 self._branch = (self._opener("branch").read().strip()
44 or "default")
44 or "default")
45 except IOError:
45 except IOError:
46 self._branch = "default"
46 self._branch = "default"
47 return self._branch
47 return self._branch
48 elif name == '_pl':
48 elif name == '_pl':
49 self._pl = [nullid, nullid]
49 self._pl = [nullid, nullid]
50 try:
50 try:
51 st = self._opener("dirstate").read(40)
51 st = self._opener("dirstate").read(40)
52 if len(st) == 40:
52 if len(st) == 40:
53 self._pl = st[:20], st[20:40]
53 self._pl = st[:20], st[20:40]
54 except IOError, err:
54 except IOError, err:
55 if err.errno != errno.ENOENT: raise
55 if err.errno != errno.ENOENT: raise
56 return self._pl
56 return self._pl
57 elif name == '_dirs':
57 elif name == '_dirs':
58 self._dirs = {}
58 self._dirs = {}
59 for f in self._map:
59 for f in self._map:
60 if self[f] != 'r':
60 if self[f] != 'r':
61 self._incpath(f)
61 self._incpath(f)
62 return self._dirs
62 return self._dirs
63 elif name == '_ignore':
63 elif name == '_ignore':
64 files = [self._join('.hgignore')]
64 files = [self._join('.hgignore')]
65 for name, path in self._ui.configitems("ui"):
65 for name, path in self._ui.configitems("ui"):
66 if name == 'ignore' or name.startswith('ignore.'):
66 if name == 'ignore' or name.startswith('ignore.'):
67 files.append(os.path.expanduser(path))
67 files.append(os.path.expanduser(path))
68 self._ignore = ignore.ignore(self._root, files, self._ui.warn)
68 self._ignore = ignore.ignore(self._root, files, self._ui.warn)
69 return self._ignore
69 return self._ignore
70 elif name == '_slash':
70 elif name == '_slash':
71 self._slash = self._ui.configbool('ui', 'slash') and os.sep != '/'
71 self._slash = self._ui.configbool('ui', 'slash') and os.sep != '/'
72 return self._slash
72 return self._slash
73 elif name == '_checklink':
73 elif name == '_checklink':
74 self._checklink = util.checklink(self._root)
74 self._checklink = util.checklink(self._root)
75 return self._checklink
75 return self._checklink
76 elif name == '_checkexec':
76 elif name == '_checkexec':
77 self._checkexec = util.checkexec(self._root)
77 self._checkexec = util.checkexec(self._root)
78 return self._checkexec
78 return self._checkexec
79 elif name == '_checkcase':
79 elif name == '_checkcase':
80 self._checkcase = not util.checkcase(self._join('.hg'))
80 self._checkcase = not util.checkcase(self._join('.hg'))
81 return self._checkcase
81 return self._checkcase
82 elif name == 'normalize':
82 elif name == 'normalize':
83 if self._checkcase:
83 if self._checkcase:
84 self.normalize = self._normalize
84 self.normalize = self._normalize
85 else:
85 else:
86 self.normalize = lambda x: x
86 self.normalize = lambda x: x
87 return self.normalize
87 return self.normalize
88 else:
88 else:
89 raise AttributeError, name
89 raise AttributeError, name
90
90
91 def _join(self, f):
91 def _join(self, f):
92 return os.path.join(self._root, f)
92 return os.path.join(self._root, f)
93
93
94 def flagfunc(self, fallback):
94 def flagfunc(self, fallback):
95 if self._checklink:
95 if self._checklink:
96 if self._checkexec:
96 if self._checkexec:
97 def f(x):
97 def f(x):
98 p = os.path.join(self._root, x)
98 p = os.path.join(self._root, x)
99 if os.path.islink(p):
99 if os.path.islink(p):
100 return 'l'
100 return 'l'
101 if util.is_exec(p):
101 if util.is_exec(p):
102 return 'x'
102 return 'x'
103 return ''
103 return ''
104 return f
104 return f
105 def f(x):
105 def f(x):
106 if os.path.islink(os.path.join(self._root, x)):
106 if os.path.islink(os.path.join(self._root, x)):
107 return 'l'
107 return 'l'
108 if 'x' in fallback(x):
108 if 'x' in fallback(x):
109 return 'x'
109 return 'x'
110 return ''
110 return ''
111 return f
111 return f
112 if self._checkexec:
112 if self._checkexec:
113 def f(x):
113 def f(x):
114 if 'l' in fallback(x):
114 if 'l' in fallback(x):
115 return 'l'
115 return 'l'
116 if util.is_exec(os.path.join(self._root, x)):
116 if util.is_exec(os.path.join(self._root, x)):
117 return 'x'
117 return 'x'
118 return ''
118 return ''
119 return f
119 return f
120 return fallback
120 return fallback
121
121
122 def getcwd(self):
122 def getcwd(self):
123 cwd = os.getcwd()
123 cwd = os.getcwd()
124 if cwd == self._root: return ''
124 if cwd == self._root: return ''
125 # self._root ends with a path separator if self._root is '/' or 'C:\'
125 # self._root ends with a path separator if self._root is '/' or 'C:\'
126 rootsep = self._root
126 rootsep = self._root
127 if not util.endswithsep(rootsep):
127 if not util.endswithsep(rootsep):
128 rootsep += os.sep
128 rootsep += os.sep
129 if cwd.startswith(rootsep):
129 if cwd.startswith(rootsep):
130 return cwd[len(rootsep):]
130 return cwd[len(rootsep):]
131 else:
131 else:
132 # we're outside the repo. return an absolute path.
132 # we're outside the repo. return an absolute path.
133 return cwd
133 return cwd
134
134
135 def pathto(self, f, cwd=None):
135 def pathto(self, f, cwd=None):
136 if cwd is None:
136 if cwd is None:
137 cwd = self.getcwd()
137 cwd = self.getcwd()
138 path = util.pathto(self._root, cwd, f)
138 path = util.pathto(self._root, cwd, f)
139 if self._slash:
139 if self._slash:
140 return util.normpath(path)
140 return util.normpath(path)
141 return path
141 return path
142
142
143 def __getitem__(self, key):
143 def __getitem__(self, key):
144 ''' current states:
144 ''' current states:
145 n normal
145 n normal
146 m needs merging
146 m needs merging
147 r marked for removal
147 r marked for removal
148 a marked for addition
148 a marked for addition
149 ? not tracked'''
149 ? not tracked'''
150 return self._map.get(key, ("?",))[0]
150 return self._map.get(key, ("?",))[0]
151
151
152 def __contains__(self, key):
152 def __contains__(self, key):
153 return key in self._map
153 return key in self._map
154
154
155 def __iter__(self):
155 def __iter__(self):
156 a = self._map.keys()
156 a = self._map.keys()
157 a.sort()
157 a.sort()
158 for x in a:
158 for x in a:
159 yield x
159 yield x
160
160
161 def parents(self):
161 def parents(self):
162 return self._pl
162 return self._pl
163
163
164 def branch(self):
164 def branch(self):
165 return self._branch
165 return self._branch
166
166
167 def setparents(self, p1, p2=nullid):
167 def setparents(self, p1, p2=nullid):
168 self._dirty = self._dirtypl = True
168 self._dirty = self._dirtypl = True
169 self._pl = p1, p2
169 self._pl = p1, p2
170
170
171 def setbranch(self, branch):
171 def setbranch(self, branch):
172 self._branch = branch
172 self._branch = branch
173 self._opener("branch", "w").write(branch + '\n')
173 self._opener("branch", "w").write(branch + '\n')
174
174
175 def _read(self):
175 def _read(self):
176 self._map = {}
176 self._map = {}
177 self._copymap = {}
177 self._copymap = {}
178 if not self._dirtypl:
178 if not self._dirtypl:
179 self._pl = [nullid, nullid]
179 self._pl = [nullid, nullid]
180 try:
180 try:
181 st = self._opener("dirstate").read()
181 st = self._opener("dirstate").read()
182 except IOError, err:
182 except IOError, err:
183 if err.errno != errno.ENOENT: raise
183 if err.errno != errno.ENOENT: raise
184 return
184 return
185 if not st:
185 if not st:
186 return
186 return
187
187
188 if not self._dirtypl:
188 if not self._dirtypl:
189 self._pl = [st[:20], st[20: 40]]
189 self._pl = [st[:20], st[20: 40]]
190
190
191 # deref fields so they will be local in loop
191 # deref fields so they will be local in loop
192 dmap = self._map
192 dmap = self._map
193 copymap = self._copymap
193 copymap = self._copymap
194 unpack = struct.unpack
194 unpack = struct.unpack
195 e_size = struct.calcsize(_format)
195 e_size = struct.calcsize(_format)
196 pos1 = 40
196 pos1 = 40
197 l = len(st)
197 l = len(st)
198
198
199 # the inner loop
199 # the inner loop
200 while pos1 < l:
200 while pos1 < l:
201 pos2 = pos1 + e_size
201 pos2 = pos1 + e_size
202 e = unpack(">cllll", st[pos1:pos2]) # a literal here is faster
202 e = unpack(">cllll", st[pos1:pos2]) # a literal here is faster
203 pos1 = pos2 + e[4]
203 pos1 = pos2 + e[4]
204 f = st[pos2:pos1]
204 f = st[pos2:pos1]
205 if '\0' in f:
205 if '\0' in f:
206 f, c = f.split('\0')
206 f, c = f.split('\0')
207 copymap[f] = c
207 copymap[f] = c
208 dmap[f] = e # we hold onto e[4] because making a subtuple is slow
208 dmap[f] = e # we hold onto e[4] because making a subtuple is slow
209
209
210 def invalidate(self):
210 def invalidate(self):
211 for a in "_map _copymap _foldmap _branch _pl _dirs _ignore".split():
211 for a in "_map _copymap _foldmap _branch _pl _dirs _ignore".split():
212 if a in self.__dict__:
212 if a in self.__dict__:
213 delattr(self, a)
213 delattr(self, a)
214 self._dirty = False
214 self._dirty = False
215
215
216 def copy(self, source, dest):
216 def copy(self, source, dest):
217 if source == dest:
217 if source == dest:
218 return
218 return
219 self._dirty = True
219 self._dirty = True
220 self._copymap[dest] = source
220 self._copymap[dest] = source
221
221
222 def copied(self, file):
222 def copied(self, file):
223 return self._copymap.get(file, None)
223 return self._copymap.get(file, None)
224
224
225 def copies(self):
225 def copies(self):
226 return self._copymap
226 return self._copymap
227
227
228 def _incpath(self, path):
228 def _incpath(self, path):
229 c = path.rfind('/')
229 c = path.rfind('/')
230 if c >= 0:
230 if c >= 0:
231 dirs = self._dirs
231 dirs = self._dirs
232 base = path[:c]
232 base = path[:c]
233 if base not in dirs:
233 if base not in dirs:
234 self._incpath(base)
234 self._incpath(base)
235 dirs[base] = 1
235 dirs[base] = 1
236 else:
236 else:
237 dirs[base] += 1
237 dirs[base] += 1
238
238
239 def _decpath(self, path):
239 def _decpath(self, path):
240 c = path.rfind('/')
240 c = path.rfind('/')
241 if c >= 0:
241 if c >= 0:
242 base = path[:c]
242 base = path[:c]
243 dirs = self._dirs
243 dirs = self._dirs
244 if dirs[base] == 1:
244 if dirs[base] == 1:
245 del dirs[base]
245 del dirs[base]
246 self._decpath(base)
246 self._decpath(base)
247 else:
247 else:
248 dirs[base] -= 1
248 dirs[base] -= 1
249
249
250 def _incpathcheck(self, f):
250 def _incpathcheck(self, f):
251 if '\r' in f or '\n' in f:
251 if '\r' in f or '\n' in f:
252 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
252 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
253 % f)
253 % f)
254 # shadows
254 # shadows
255 if f in self._dirs:
255 if f in self._dirs:
256 raise util.Abort(_('directory %r already in dirstate') % f)
256 raise util.Abort(_('directory %r already in dirstate') % f)
257 for c in strutil.rfindall(f, '/'):
257 for c in strutil.rfindall(f, '/'):
258 d = f[:c]
258 d = f[:c]
259 if d in self._dirs:
259 if d in self._dirs:
260 break
260 break
261 if d in self._map and self[d] != 'r':
261 if d in self._map and self[d] != 'r':
262 raise util.Abort(_('file %r in dirstate clashes with %r') %
262 raise util.Abort(_('file %r in dirstate clashes with %r') %
263 (d, f))
263 (d, f))
264 self._incpath(f)
264 self._incpath(f)
265
265
266 def _changepath(self, f, newstate, relaxed=False):
266 def _changepath(self, f, newstate, relaxed=False):
267 # handle upcoming path changes
267 # handle upcoming path changes
268 oldstate = self[f]
268 oldstate = self[f]
269 if oldstate not in "?r" and newstate in "?r":
269 if oldstate not in "?r" and newstate in "?r":
270 if "_dirs" in self.__dict__:
270 if "_dirs" in self.__dict__:
271 self._decpath(f)
271 self._decpath(f)
272 return
272 return
273 if oldstate in "?r" and newstate not in "?r":
273 if oldstate in "?r" and newstate not in "?r":
274 if relaxed and oldstate == '?':
274 if relaxed and oldstate == '?':
275 # XXX
275 # XXX
276 # in relaxed mode we assume the caller knows
276 # in relaxed mode we assume the caller knows
277 # what it is doing, workaround for updating
277 # what it is doing, workaround for updating
278 # dir-to-file revisions
278 # dir-to-file revisions
279 if "_dirs" in self.__dict__:
279 if "_dirs" in self.__dict__:
280 self._incpath(f)
280 self._incpath(f)
281 return
281 return
282 self._incpathcheck(f)
282 self._incpathcheck(f)
283 return
283 return
284
284
285 def normal(self, f):
285 def normal(self, f):
286 'mark a file normal and clean'
286 'mark a file normal and clean'
287 self._dirty = True
287 self._dirty = True
288 self._changepath(f, 'n', True)
288 self._changepath(f, 'n', True)
289 s = os.lstat(self._join(f))
289 s = os.lstat(self._join(f))
290 self._map[f] = ('n', s.st_mode, s.st_size, s.st_mtime, 0)
290 self._map[f] = ('n', s.st_mode, s.st_size, s.st_mtime, 0)
291 if f in self._copymap:
291 if f in self._copymap:
292 del self._copymap[f]
292 del self._copymap[f]
293
293
294 def normallookup(self, f):
294 def normallookup(self, f):
295 'mark a file normal, but possibly dirty'
295 'mark a file normal, but possibly dirty'
296 if self._pl[1] != nullid and f in self._map:
296 if self._pl[1] != nullid and f in self._map:
297 # if there is a merge going on and the file was either
297 # if there is a merge going on and the file was either
298 # in state 'm' or dirty before being removed, restore that state.
298 # in state 'm' or dirty before being removed, restore that state.
299 entry = self._map[f]
299 entry = self._map[f]
300 if entry[0] == 'r' and entry[2] in (-1, -2):
300 if entry[0] == 'r' and entry[2] in (-1, -2):
301 source = self._copymap.get(f)
301 source = self._copymap.get(f)
302 if entry[2] == -1:
302 if entry[2] == -1:
303 self.merge(f)
303 self.merge(f)
304 elif entry[2] == -2:
304 elif entry[2] == -2:
305 self.normaldirty(f)
305 self.normaldirty(f)
306 if source:
306 if source:
307 self.copy(source, f)
307 self.copy(source, f)
308 return
308 return
309 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
309 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
310 return
310 return
311 self._dirty = True
311 self._dirty = True
312 self._changepath(f, 'n', True)
312 self._changepath(f, 'n', True)
313 self._map[f] = ('n', 0, -1, -1, 0)
313 self._map[f] = ('n', 0, -1, -1, 0)
314 if f in self._copymap:
314 if f in self._copymap:
315 del self._copymap[f]
315 del self._copymap[f]
316
316
317 def normaldirty(self, f):
317 def normaldirty(self, f):
318 'mark a file normal, but dirty'
318 'mark a file normal, but dirty'
319 self._dirty = True
319 self._dirty = True
320 self._changepath(f, 'n', True)
320 self._changepath(f, 'n', True)
321 self._map[f] = ('n', 0, -2, -1, 0)
321 self._map[f] = ('n', 0, -2, -1, 0)
322 if f in self._copymap:
322 if f in self._copymap:
323 del self._copymap[f]
323 del self._copymap[f]
324
324
325 def add(self, f):
325 def add(self, f):
326 'mark a file added'
326 'mark a file added'
327 self._dirty = True
327 self._dirty = True
328 self._changepath(f, 'a')
328 self._changepath(f, 'a')
329 self._map[f] = ('a', 0, -1, -1, 0)
329 self._map[f] = ('a', 0, -1, -1, 0)
330 if f in self._copymap:
330 if f in self._copymap:
331 del self._copymap[f]
331 del self._copymap[f]
332
332
333 def remove(self, f):
333 def remove(self, f):
334 'mark a file removed'
334 'mark a file removed'
335 self._dirty = True
335 self._dirty = True
336 self._changepath(f, 'r')
336 self._changepath(f, 'r')
337 size = 0
337 size = 0
338 if self._pl[1] != nullid and f in self._map:
338 if self._pl[1] != nullid and f in self._map:
339 entry = self._map[f]
339 entry = self._map[f]
340 if entry[0] == 'm':
340 if entry[0] == 'm':
341 size = -1
341 size = -1
342 elif entry[0] == 'n' and entry[2] == -2:
342 elif entry[0] == 'n' and entry[2] == -2:
343 size = -2
343 size = -2
344 self._map[f] = ('r', 0, size, 0, 0)
344 self._map[f] = ('r', 0, size, 0, 0)
345 if size == 0 and f in self._copymap:
345 if size == 0 and f in self._copymap:
346 del self._copymap[f]
346 del self._copymap[f]
347
347
348 def merge(self, f):
348 def merge(self, f):
349 'mark a file merged'
349 'mark a file merged'
350 self._dirty = True
350 self._dirty = True
351 s = os.lstat(self._join(f))
351 s = os.lstat(self._join(f))
352 self._changepath(f, 'm', True)
352 self._changepath(f, 'm', True)
353 self._map[f] = ('m', s.st_mode, s.st_size, s.st_mtime, 0)
353 self._map[f] = ('m', s.st_mode, s.st_size, s.st_mtime, 0)
354 if f in self._copymap:
354 if f in self._copymap:
355 del self._copymap[f]
355 del self._copymap[f]
356
356
357 def forget(self, f):
357 def forget(self, f):
358 'forget a file'
358 'forget a file'
359 self._dirty = True
359 self._dirty = True
360 try:
360 try:
361 self._changepath(f, '?')
361 self._changepath(f, '?')
362 del self._map[f]
362 del self._map[f]
363 except KeyError:
363 except KeyError:
364 self._ui.warn(_("not in dirstate: %s\n") % f)
364 self._ui.warn(_("not in dirstate: %s\n") % f)
365
365
366 def _normalize(self, path):
366 def _normalize(self, path):
367 normpath = os.path.normcase(os.path.normpath(path))
367 normpath = os.path.normcase(os.path.normpath(path))
368 if normpath in self._foldmap:
368 if normpath in self._foldmap:
369 return self._foldmap[normpath]
369 return self._foldmap[normpath]
370 elif os.path.exists(path):
370 elif os.path.exists(path):
371 self._foldmap[normpath] = util.fspath(path, self._root)
371 self._foldmap[normpath] = util.fspath(path, self._root)
372 return self._foldmap[normpath]
372 return self._foldmap[normpath]
373 else:
373 else:
374 return path
374 return path
375
375
376 def clear(self):
376 def clear(self):
377 self._map = {}
377 self._map = {}
378 if "_dirs" in self.__dict__:
378 if "_dirs" in self.__dict__:
379 delattr(self, "_dirs");
379 delattr(self, "_dirs");
380 self._copymap = {}
380 self._copymap = {}
381 self._pl = [nullid, nullid]
381 self._pl = [nullid, nullid]
382 self._dirty = True
382 self._dirty = True
383
383
384 def rebuild(self, parent, files):
384 def rebuild(self, parent, files):
385 self.clear()
385 self.clear()
386 for f in files:
386 for f in files:
387 if 'x' in files.flags(f):
387 if 'x' in files.flags(f):
388 self._map[f] = ('n', 0777, -1, 0, 0)
388 self._map[f] = ('n', 0777, -1, 0, 0)
389 else:
389 else:
390 self._map[f] = ('n', 0666, -1, 0, 0)
390 self._map[f] = ('n', 0666, -1, 0, 0)
391 self._pl = (parent, nullid)
391 self._pl = (parent, nullid)
392 self._dirty = True
392 self._dirty = True
393
393
394 def write(self):
394 def write(self):
395 if not self._dirty:
395 if not self._dirty:
396 return
396 return
397 st = self._opener("dirstate", "w", atomictemp=True)
397 st = self._opener("dirstate", "w", atomictemp=True)
398
398
399 try:
399 try:
400 gran = int(self._ui.config('dirstate', 'granularity', 1))
400 gran = int(self._ui.config('dirstate', 'granularity', 1))
401 except ValueError:
401 except ValueError:
402 gran = 1
402 gran = 1
403 limit = sys.maxint
403 limit = sys.maxint
404 if gran > 0:
404 if gran > 0:
405 limit = util.fstat(st).st_mtime - gran
405 limit = util.fstat(st).st_mtime - gran
406
406
407 cs = cStringIO.StringIO()
407 cs = cStringIO.StringIO()
408 copymap = self._copymap
408 copymap = self._copymap
409 pack = struct.pack
409 pack = struct.pack
410 write = cs.write
410 write = cs.write
411 write("".join(self._pl))
411 write("".join(self._pl))
412 for f, e in self._map.iteritems():
412 for f, e in self._map.iteritems():
413 if f in copymap:
413 if f in copymap:
414 f = "%s\0%s" % (f, copymap[f])
414 f = "%s\0%s" % (f, copymap[f])
415 if e[3] > limit and e[0] == 'n':
415 if e[3] > limit and e[0] == 'n':
416 e = (e[0], 0, -1, -1, 0)
416 e = (e[0], 0, -1, -1, 0)
417 e = pack(_format, e[0], e[1], e[2], e[3], len(f))
417 e = pack(_format, e[0], e[1], e[2], e[3], len(f))
418 write(e)
418 write(e)
419 write(f)
419 write(f)
420 st.write(cs.getvalue())
420 st.write(cs.getvalue())
421 st.rename()
421 st.rename()
422 self._dirty = self._dirtypl = False
422 self._dirty = self._dirtypl = False
423
423
424 def _filter(self, files):
424 def _filter(self, files):
425 ret = {}
425 ret = {}
426 unknown = []
426 unknown = []
427
427
428 for x in files:
428 for x in files:
429 if x == '.':
429 if x == '.':
430 return self._map.copy()
430 return self._map.copy()
431 if x not in self._map:
431 if x not in self._map:
432 unknown.append(x)
432 unknown.append(x)
433 else:
433 else:
434 ret[x] = self._map[x]
434 ret[x] = self._map[x]
435
435
436 if not unknown:
436 if not unknown:
437 return ret
437 return ret
438
438
439 b = self._map.keys()
439 b = self._map.keys()
440 b.sort()
440 b.sort()
441 blen = len(b)
441 blen = len(b)
442
442
443 for x in unknown:
443 for x in unknown:
444 bs = bisect.bisect(b, "%s%s" % (x, '/'))
444 bs = bisect.bisect(b, "%s%s" % (x, '/'))
445 while bs < blen:
445 while bs < blen:
446 s = b[bs]
446 s = b[bs]
447 if len(s) > len(x) and s.startswith(x):
447 if len(s) > len(x) and s.startswith(x):
448 ret[s] = self._map[s]
448 ret[s] = self._map[s]
449 else:
449 else:
450 break
450 break
451 bs += 1
451 bs += 1
452 return ret
452 return ret
453
453
454 def _supported(self, f, mode, verbose=False):
454 def _supported(self, f, mode, verbose=False):
455 if stat.S_ISREG(mode) or stat.S_ISLNK(mode):
455 if stat.S_ISREG(mode) or stat.S_ISLNK(mode):
456 return True
456 return True
457 if verbose:
457 if verbose:
458 kind = 'unknown'
458 kind = 'unknown'
459 if stat.S_ISCHR(mode): kind = _('character device')
459 if stat.S_ISCHR(mode): kind = _('character device')
460 elif stat.S_ISBLK(mode): kind = _('block device')
460 elif stat.S_ISBLK(mode): kind = _('block device')
461 elif stat.S_ISFIFO(mode): kind = _('fifo')
461 elif stat.S_ISFIFO(mode): kind = _('fifo')
462 elif stat.S_ISSOCK(mode): kind = _('socket')
462 elif stat.S_ISSOCK(mode): kind = _('socket')
463 elif stat.S_ISDIR(mode): kind = _('directory')
463 elif stat.S_ISDIR(mode): kind = _('directory')
464 self._ui.warn(_('%s: unsupported file type (type is %s)\n')
464 self._ui.warn(_('%s: unsupported file type (type is %s)\n')
465 % (self.pathto(f), kind))
465 % (self.pathto(f), kind))
466 return False
466 return False
467
467
468 def _dirignore(self, f):
468 def _dirignore(self, f):
469 if f == '.':
469 if f == '.':
470 return False
470 return False
471 if self._ignore(f):
471 if self._ignore(f):
472 return True
472 return True
473 for c in strutil.findall(f, '/'):
473 for c in strutil.findall(f, '/'):
474 if self._ignore(f[:c]):
474 if self._ignore(f[:c]):
475 return True
475 return True
476 return False
476 return False
477
477
478 def walk(self, match):
478 def walk(self, match, unknown, ignored):
479 # filter out the src and stat
480 for src, f, st in self.statwalk(match):
481 yield f
482
483 def statwalk(self, match, unknown=True, ignored=False):
484 '''
479 '''
485 walk recursively through the directory tree, finding all files
480 walk recursively through the directory tree, finding all files
486 matched by the match function
481 matched by the match function
487
482
488 results are yielded in a tuple (src, filename, st), where src
483 results are yielded in a tuple (src, filename, st), where src
489 is one of:
484 is one of:
490 'f' the file was found in the directory tree
485 'f' the file was found in the directory tree
491 'm' the file was only in the dirstate and not in the tree
486 'm' the file was only in the dirstate and not in the tree
492
487
493 and st is the stat result if the file was found in the directory.
488 and st is the stat result if the file was found in the directory.
494 '''
489 '''
495
490
496 def fwarn(f, msg):
491 def fwarn(f, msg):
497 self._ui.warn('%s: %s\n' % (self.pathto(ff), msg))
492 self._ui.warn('%s: %s\n' % (self.pathto(ff), msg))
498 return False
493 return False
499 badfn = fwarn
494 badfn = fwarn
500 if hasattr(match, 'bad'):
495 if hasattr(match, 'bad'):
501 badfn = match.bad
496 badfn = match.bad
502
497
503 # walk all files by default
498 # walk all files by default
504 files = match.files()
499 files = match.files()
505 if not files:
500 if not files:
506 files = ['.']
501 files = ['.']
507 dc = self._map.copy()
502 dc = self._map.copy()
508 else:
503 else:
509 files = util.unique(files)
504 files = util.unique(files)
510 dc = self._filter(files)
505 dc = self._filter(files)
511
506
512 def imatch(file_):
507 def imatch(file_):
513 if file_ not in dc and self._ignore(file_):
508 if file_ not in dc and self._ignore(file_):
514 return False
509 return False
515 return match(file_)
510 return match(file_)
516
511
517 # TODO: don't walk unknown directories if unknown and ignored are False
512 # TODO: don't walk unknown directories if unknown and ignored are False
518 ignore = self._ignore
513 ignore = self._ignore
519 dirignore = self._dirignore
514 dirignore = self._dirignore
520 if ignored:
515 if ignored:
521 imatch = match
516 imatch = match
522 ignore = util.never
517 ignore = util.never
523 dirignore = util.never
518 dirignore = util.never
524
519
525 # self._root may end with a path separator when self._root == '/'
520 # self._root may end with a path separator when self._root == '/'
526 common_prefix_len = len(self._root)
521 common_prefix_len = len(self._root)
527 if not util.endswithsep(self._root):
522 if not util.endswithsep(self._root):
528 common_prefix_len += 1
523 common_prefix_len += 1
529
524
530 normpath = util.normpath
525 normpath = util.normpath
531 listdir = osutil.listdir
526 listdir = osutil.listdir
532 lstat = os.lstat
527 lstat = os.lstat
533 bisect_left = bisect.bisect_left
528 bisect_left = bisect.bisect_left
534 isdir = os.path.isdir
529 isdir = os.path.isdir
535 pconvert = util.pconvert
530 pconvert = util.pconvert
536 join = os.path.join
531 join = os.path.join
537 s_isdir = stat.S_ISDIR
532 s_isdir = stat.S_ISDIR
538 supported = self._supported
533 supported = self._supported
539 _join = self._join
534 _join = self._join
540 known = {'.hg': 1}
535 known = {'.hg': 1}
541
536
542 # recursion free walker, faster than os.walk.
537 # recursion free walker, faster than os.walk.
543 def findfiles(s):
538 def findfiles(s):
544 work = [s]
539 work = [s]
545 wadd = work.append
540 wadd = work.append
546 found = []
541 found = []
547 add = found.append
542 add = found.append
548 if hasattr(match, 'dir'):
543 if hasattr(match, 'dir'):
549 match.dir(normpath(s[common_prefix_len:]))
544 match.dir(normpath(s[common_prefix_len:]))
550 while work:
545 while work:
551 top = work.pop()
546 top = work.pop()
552 entries = listdir(top, stat=True)
547 entries = listdir(top, stat=True)
553 # nd is the top of the repository dir tree
548 # nd is the top of the repository dir tree
554 nd = normpath(top[common_prefix_len:])
549 nd = normpath(top[common_prefix_len:])
555 if nd == '.':
550 if nd == '.':
556 nd = ''
551 nd = ''
557 else:
552 else:
558 # do not recurse into a repo contained in this
553 # do not recurse into a repo contained in this
559 # one. use bisect to find .hg directory so speed
554 # one. use bisect to find .hg directory so speed
560 # is good on big directory.
555 # is good on big directory.
561 names = [e[0] for e in entries]
556 names = [e[0] for e in entries]
562 hg = bisect_left(names, '.hg')
557 hg = bisect_left(names, '.hg')
563 if hg < len(names) and names[hg] == '.hg':
558 if hg < len(names) and names[hg] == '.hg':
564 if isdir(join(top, '.hg')):
559 if isdir(join(top, '.hg')):
565 continue
560 continue
566 for f, kind, st in entries:
561 for f, kind, st in entries:
567 np = pconvert(join(nd, f))
562 np = pconvert(join(nd, f))
568 if np in known:
563 if np in known:
569 continue
564 continue
570 known[np] = 1
565 known[np] = 1
571 p = join(top, f)
566 p = join(top, f)
572 # don't trip over symlinks
567 # don't trip over symlinks
573 if kind == stat.S_IFDIR:
568 if kind == stat.S_IFDIR:
574 if not ignore(np):
569 if not ignore(np):
575 wadd(p)
570 wadd(p)
576 if hasattr(match, 'dir'):
571 if hasattr(match, 'dir'):
577 match.dir(np)
572 match.dir(np)
578 if np in dc and match(np):
573 if np in dc and match(np):
579 add((np, 'm', st))
574 add((np, 'm', st))
580 elif imatch(np):
575 elif imatch(np):
581 if supported(np, st.st_mode):
576 if supported(np, st.st_mode):
582 add((np, 'f', st))
577 add((np, 'f', st))
583 elif np in dc:
578 elif np in dc:
584 add((np, 'm', st))
579 add((np, 'm', st))
585 found.sort()
580 found.sort()
586 return found
581 return found
587
582
588 # step one, find all files that match our criteria
583 # step one, find all files that match our criteria
589 files.sort()
584 files.sort()
590 for ff in files:
585 for ff in files:
591 nf = normpath(ff)
586 nf = normpath(ff)
592 f = _join(ff)
587 f = _join(ff)
593 try:
588 try:
594 st = lstat(f)
589 st = lstat(f)
595 except OSError, inst:
590 except OSError, inst:
596 found = False
591 found = False
597 for fn in dc:
592 for fn in dc:
598 if nf == fn or (fn.startswith(nf) and fn[len(nf)] == '/'):
593 if nf == fn or (fn.startswith(nf) and fn[len(nf)] == '/'):
599 found = True
594 found = True
600 break
595 break
601 if not found:
596 if not found:
602 if inst.errno != errno.ENOENT:
597 if inst.errno != errno.ENOENT:
603 fwarn(ff, inst.strerror)
598 fwarn(ff, inst.strerror)
604 elif badfn(ff, inst.strerror) and imatch(nf):
599 elif badfn(ff, inst.strerror) and imatch(nf):
605 yield 'f', ff, None
600 yield 'f', ff, None
606 continue
601 continue
607 if s_isdir(st.st_mode):
602 if s_isdir(st.st_mode):
608 if not dirignore(nf):
603 if not dirignore(nf):
609 for f, src, st in findfiles(f):
604 for f, src, st in findfiles(f):
610 yield src, f, st
605 yield src, f, st
611 else:
606 else:
612 if nf in known:
607 if nf in known:
613 continue
608 continue
614 known[nf] = 1
609 known[nf] = 1
615 if match(nf):
610 if match(nf):
616 if supported(ff, st.st_mode, verbose=True):
611 if supported(ff, st.st_mode, verbose=True):
617 yield 'f', self.normalize(nf), st
612 yield 'f', self.normalize(nf), st
618 elif ff in dc:
613 elif ff in dc:
619 yield 'm', nf, st
614 yield 'm', nf, st
620
615
621 # step two run through anything left in the dc hash and yield
616 # step two run through anything left in the dc hash and yield
622 # if we haven't already seen it
617 # if we haven't already seen it
623 ks = dc.keys()
618 ks = dc.keys()
624 ks.sort()
619 ks.sort()
625 for k in ks:
620 for k in ks:
626 if k in known:
621 if k in known:
627 continue
622 continue
628 known[k] = 1
623 known[k] = 1
629 if imatch(k):
624 if imatch(k):
630 yield 'm', k, None
625 yield 'm', k, None
631
626
632 def status(self, match, ignored, clean, unknown):
627 def status(self, match, ignored, clean, unknown):
633 listignored, listclean, listunknown = ignored, clean, unknown
628 listignored, listclean, listunknown = ignored, clean, unknown
634
635 lookup, modified, added, unknown, ignored = [], [], [], [], []
629 lookup, modified, added, unknown, ignored = [], [], [], [], []
636 removed, deleted, clean = [], [], []
630 removed, deleted, clean = [], [], []
637
631
638 _join = self._join
632 _join = self._join
639 lstat = os.lstat
633 lstat = os.lstat
640 cmap = self._copymap
634 cmap = self._copymap
641 dmap = self._map
635 dmap = self._map
642 ladd = lookup.append
636 ladd = lookup.append
643 madd = modified.append
637 madd = modified.append
644 aadd = added.append
638 aadd = added.append
645 uadd = unknown.append
639 uadd = unknown.append
646 iadd = ignored.append
640 iadd = ignored.append
647 radd = removed.append
641 radd = removed.append
648 dadd = deleted.append
642 dadd = deleted.append
649 cadd = clean.append
643 cadd = clean.append
650
644
651 for src, fn, st in self.statwalk(match, listunknown, listignored):
645 for src, fn, st in self.walk(match, listunknown, listignored):
652 if fn not in dmap:
646 if fn not in dmap:
653 if (listignored or match.exact(fn)) and self._dirignore(fn):
647 if (listignored or match.exact(fn)) and self._dirignore(fn):
654 if listignored:
648 if listignored:
655 iadd(fn)
649 iadd(fn)
656 elif listunknown:
650 elif listunknown:
657 uadd(fn)
651 uadd(fn)
658 continue
652 continue
659
653
660 state, mode, size, time, foo = dmap[fn]
654 state, mode, size, time, foo = dmap[fn]
661
655
662 if src == 'm':
656 if src == 'm':
663 nonexistent = True
657 nonexistent = True
664 if not st:
658 if not st:
665 try:
659 try:
666 st = lstat(_join(fn))
660 st = lstat(_join(fn))
667 except OSError, inst:
661 except OSError, inst:
668 if inst.errno not in (errno.ENOENT, errno.ENOTDIR):
662 if inst.errno not in (errno.ENOENT, errno.ENOTDIR):
669 raise
663 raise
670 st = None
664 st = None
671 # We need to re-check that it is a valid file
665 # We need to re-check that it is a valid file
672 if st and self._supported(fn, st.st_mode):
666 if st and self._supported(fn, st.st_mode):
673 nonexistent = False
667 nonexistent = False
674 if nonexistent and state in "nma":
668 if nonexistent and state in "nma":
675 dadd(fn)
669 dadd(fn)
676 continue
670 continue
677 # check the common case first
671 # check the common case first
678 if state == 'n':
672 if state == 'n':
679 if not st:
673 if not st:
680 st = lstat(_join(fn))
674 st = lstat(_join(fn))
681 if (size >= 0 and
675 if (size >= 0 and
682 (size != st.st_size
676 (size != st.st_size
683 or ((mode ^ st.st_mode) & 0100 and self._checkexec))
677 or ((mode ^ st.st_mode) & 0100 and self._checkexec))
684 or size == -2
678 or size == -2
685 or fn in self._copymap):
679 or fn in self._copymap):
686 madd(fn)
680 madd(fn)
687 elif time != int(st.st_mtime):
681 elif time != int(st.st_mtime):
688 ladd(fn)
682 ladd(fn)
689 elif listclean:
683 elif listclean:
690 cadd(fn)
684 cadd(fn)
691 elif state == 'm':
685 elif state == 'm':
692 madd(fn)
686 madd(fn)
693 elif state == 'a':
687 elif state == 'a':
694 aadd(fn)
688 aadd(fn)
695 elif state == 'r':
689 elif state == 'r':
696 radd(fn)
690 radd(fn)
697
691
698 return (lookup, modified, added, removed, deleted, unknown, ignored,
692 return (lookup, modified, added, removed, deleted, unknown, ignored,
699 clean)
693 clean)
@@ -1,2137 +1,2137 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import lock, transaction, stat, errno, ui
12 import lock, transaction, stat, errno, ui
13 import os, revlog, time, util, extensions, hook, inspect
13 import os, revlog, time, util, extensions, hook, inspect
14 import match as match_
14 import match as match_
15
15
16 class localrepository(repo.repository):
16 class localrepository(repo.repository):
17 capabilities = util.set(('lookup', 'changegroupsubset'))
17 capabilities = util.set(('lookup', 'changegroupsubset'))
18 supported = ('revlogv1', 'store')
18 supported = ('revlogv1', 'store')
19
19
20 def __init__(self, parentui, path=None, create=0):
20 def __init__(self, parentui, path=None, create=0):
21 repo.repository.__init__(self)
21 repo.repository.__init__(self)
22 self.root = os.path.realpath(path)
22 self.root = os.path.realpath(path)
23 self.path = os.path.join(self.root, ".hg")
23 self.path = os.path.join(self.root, ".hg")
24 self.origroot = path
24 self.origroot = path
25 self.opener = util.opener(self.path)
25 self.opener = util.opener(self.path)
26 self.wopener = util.opener(self.root)
26 self.wopener = util.opener(self.root)
27
27
28 if not os.path.isdir(self.path):
28 if not os.path.isdir(self.path):
29 if create:
29 if create:
30 if not os.path.exists(path):
30 if not os.path.exists(path):
31 os.mkdir(path)
31 os.mkdir(path)
32 os.mkdir(self.path)
32 os.mkdir(self.path)
33 requirements = ["revlogv1"]
33 requirements = ["revlogv1"]
34 if parentui.configbool('format', 'usestore', True):
34 if parentui.configbool('format', 'usestore', True):
35 os.mkdir(os.path.join(self.path, "store"))
35 os.mkdir(os.path.join(self.path, "store"))
36 requirements.append("store")
36 requirements.append("store")
37 # create an invalid changelog
37 # create an invalid changelog
38 self.opener("00changelog.i", "a").write(
38 self.opener("00changelog.i", "a").write(
39 '\0\0\0\2' # represents revlogv2
39 '\0\0\0\2' # represents revlogv2
40 ' dummy changelog to prevent using the old repo layout'
40 ' dummy changelog to prevent using the old repo layout'
41 )
41 )
42 reqfile = self.opener("requires", "w")
42 reqfile = self.opener("requires", "w")
43 for r in requirements:
43 for r in requirements:
44 reqfile.write("%s\n" % r)
44 reqfile.write("%s\n" % r)
45 reqfile.close()
45 reqfile.close()
46 else:
46 else:
47 raise repo.RepoError(_("repository %s not found") % path)
47 raise repo.RepoError(_("repository %s not found") % path)
48 elif create:
48 elif create:
49 raise repo.RepoError(_("repository %s already exists") % path)
49 raise repo.RepoError(_("repository %s already exists") % path)
50 else:
50 else:
51 # find requirements
51 # find requirements
52 try:
52 try:
53 requirements = self.opener("requires").read().splitlines()
53 requirements = self.opener("requires").read().splitlines()
54 except IOError, inst:
54 except IOError, inst:
55 if inst.errno != errno.ENOENT:
55 if inst.errno != errno.ENOENT:
56 raise
56 raise
57 requirements = []
57 requirements = []
58 # check them
58 # check them
59 for r in requirements:
59 for r in requirements:
60 if r not in self.supported:
60 if r not in self.supported:
61 raise repo.RepoError(_("requirement '%s' not supported") % r)
61 raise repo.RepoError(_("requirement '%s' not supported") % r)
62
62
63 # setup store
63 # setup store
64 if "store" in requirements:
64 if "store" in requirements:
65 self.encodefn = util.encodefilename
65 self.encodefn = util.encodefilename
66 self.decodefn = util.decodefilename
66 self.decodefn = util.decodefilename
67 self.spath = os.path.join(self.path, "store")
67 self.spath = os.path.join(self.path, "store")
68 else:
68 else:
69 self.encodefn = lambda x: x
69 self.encodefn = lambda x: x
70 self.decodefn = lambda x: x
70 self.decodefn = lambda x: x
71 self.spath = self.path
71 self.spath = self.path
72
72
73 try:
73 try:
74 # files in .hg/ will be created using this mode
74 # files in .hg/ will be created using this mode
75 mode = os.stat(self.spath).st_mode
75 mode = os.stat(self.spath).st_mode
76 # avoid some useless chmods
76 # avoid some useless chmods
77 if (0777 & ~util._umask) == (0777 & mode):
77 if (0777 & ~util._umask) == (0777 & mode):
78 mode = None
78 mode = None
79 except OSError:
79 except OSError:
80 mode = None
80 mode = None
81
81
82 self._createmode = mode
82 self._createmode = mode
83 self.opener.createmode = mode
83 self.opener.createmode = mode
84 sopener = util.opener(self.spath)
84 sopener = util.opener(self.spath)
85 sopener.createmode = mode
85 sopener.createmode = mode
86 self.sopener = util.encodedopener(sopener, self.encodefn)
86 self.sopener = util.encodedopener(sopener, self.encodefn)
87
87
88 self.ui = ui.ui(parentui=parentui)
88 self.ui = ui.ui(parentui=parentui)
89 try:
89 try:
90 self.ui.readconfig(self.join("hgrc"), self.root)
90 self.ui.readconfig(self.join("hgrc"), self.root)
91 extensions.loadall(self.ui)
91 extensions.loadall(self.ui)
92 except IOError:
92 except IOError:
93 pass
93 pass
94
94
95 self.tagscache = None
95 self.tagscache = None
96 self._tagstypecache = None
96 self._tagstypecache = None
97 self.branchcache = None
97 self.branchcache = None
98 self._ubranchcache = None # UTF-8 version of branchcache
98 self._ubranchcache = None # UTF-8 version of branchcache
99 self._branchcachetip = None
99 self._branchcachetip = None
100 self.nodetagscache = None
100 self.nodetagscache = None
101 self.filterpats = {}
101 self.filterpats = {}
102 self._datafilters = {}
102 self._datafilters = {}
103 self._transref = self._lockref = self._wlockref = None
103 self._transref = self._lockref = self._wlockref = None
104
104
105 def __getattr__(self, name):
105 def __getattr__(self, name):
106 if name == 'changelog':
106 if name == 'changelog':
107 self.changelog = changelog.changelog(self.sopener)
107 self.changelog = changelog.changelog(self.sopener)
108 self.sopener.defversion = self.changelog.version
108 self.sopener.defversion = self.changelog.version
109 return self.changelog
109 return self.changelog
110 if name == 'manifest':
110 if name == 'manifest':
111 self.changelog
111 self.changelog
112 self.manifest = manifest.manifest(self.sopener)
112 self.manifest = manifest.manifest(self.sopener)
113 return self.manifest
113 return self.manifest
114 if name == 'dirstate':
114 if name == 'dirstate':
115 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
115 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
116 return self.dirstate
116 return self.dirstate
117 else:
117 else:
118 raise AttributeError, name
118 raise AttributeError, name
119
119
120 def __getitem__(self, changeid):
120 def __getitem__(self, changeid):
121 if changeid == None:
121 if changeid == None:
122 return context.workingctx(self)
122 return context.workingctx(self)
123 return context.changectx(self, changeid)
123 return context.changectx(self, changeid)
124
124
125 def __nonzero__(self):
125 def __nonzero__(self):
126 return True
126 return True
127
127
128 def __len__(self):
128 def __len__(self):
129 return len(self.changelog)
129 return len(self.changelog)
130
130
131 def __iter__(self):
131 def __iter__(self):
132 for i in xrange(len(self)):
132 for i in xrange(len(self)):
133 yield i
133 yield i
134
134
135 def url(self):
135 def url(self):
136 return 'file:' + self.root
136 return 'file:' + self.root
137
137
138 def hook(self, name, throw=False, **args):
138 def hook(self, name, throw=False, **args):
139 return hook.hook(self.ui, self, name, throw, **args)
139 return hook.hook(self.ui, self, name, throw, **args)
140
140
141 tag_disallowed = ':\r\n'
141 tag_disallowed = ':\r\n'
142
142
143 def _tag(self, names, node, message, local, user, date, parent=None,
143 def _tag(self, names, node, message, local, user, date, parent=None,
144 extra={}):
144 extra={}):
145 use_dirstate = parent is None
145 use_dirstate = parent is None
146
146
147 if isinstance(names, str):
147 if isinstance(names, str):
148 allchars = names
148 allchars = names
149 names = (names,)
149 names = (names,)
150 else:
150 else:
151 allchars = ''.join(names)
151 allchars = ''.join(names)
152 for c in self.tag_disallowed:
152 for c in self.tag_disallowed:
153 if c in allchars:
153 if c in allchars:
154 raise util.Abort(_('%r cannot be used in a tag name') % c)
154 raise util.Abort(_('%r cannot be used in a tag name') % c)
155
155
156 for name in names:
156 for name in names:
157 self.hook('pretag', throw=True, node=hex(node), tag=name,
157 self.hook('pretag', throw=True, node=hex(node), tag=name,
158 local=local)
158 local=local)
159
159
160 def writetags(fp, names, munge, prevtags):
160 def writetags(fp, names, munge, prevtags):
161 fp.seek(0, 2)
161 fp.seek(0, 2)
162 if prevtags and prevtags[-1] != '\n':
162 if prevtags and prevtags[-1] != '\n':
163 fp.write('\n')
163 fp.write('\n')
164 for name in names:
164 for name in names:
165 m = munge and munge(name) or name
165 m = munge and munge(name) or name
166 if self._tagstypecache and name in self._tagstypecache:
166 if self._tagstypecache and name in self._tagstypecache:
167 old = self.tagscache.get(name, nullid)
167 old = self.tagscache.get(name, nullid)
168 fp.write('%s %s\n' % (hex(old), m))
168 fp.write('%s %s\n' % (hex(old), m))
169 fp.write('%s %s\n' % (hex(node), m))
169 fp.write('%s %s\n' % (hex(node), m))
170 fp.close()
170 fp.close()
171
171
172 prevtags = ''
172 prevtags = ''
173 if local:
173 if local:
174 try:
174 try:
175 fp = self.opener('localtags', 'r+')
175 fp = self.opener('localtags', 'r+')
176 except IOError, err:
176 except IOError, err:
177 fp = self.opener('localtags', 'a')
177 fp = self.opener('localtags', 'a')
178 else:
178 else:
179 prevtags = fp.read()
179 prevtags = fp.read()
180
180
181 # local tags are stored in the current charset
181 # local tags are stored in the current charset
182 writetags(fp, names, None, prevtags)
182 writetags(fp, names, None, prevtags)
183 for name in names:
183 for name in names:
184 self.hook('tag', node=hex(node), tag=name, local=local)
184 self.hook('tag', node=hex(node), tag=name, local=local)
185 return
185 return
186
186
187 if use_dirstate:
187 if use_dirstate:
188 try:
188 try:
189 fp = self.wfile('.hgtags', 'rb+')
189 fp = self.wfile('.hgtags', 'rb+')
190 except IOError, err:
190 except IOError, err:
191 fp = self.wfile('.hgtags', 'ab')
191 fp = self.wfile('.hgtags', 'ab')
192 else:
192 else:
193 prevtags = fp.read()
193 prevtags = fp.read()
194 else:
194 else:
195 try:
195 try:
196 prevtags = self.filectx('.hgtags', parent).data()
196 prevtags = self.filectx('.hgtags', parent).data()
197 except revlog.LookupError:
197 except revlog.LookupError:
198 pass
198 pass
199 fp = self.wfile('.hgtags', 'wb')
199 fp = self.wfile('.hgtags', 'wb')
200 if prevtags:
200 if prevtags:
201 fp.write(prevtags)
201 fp.write(prevtags)
202
202
203 # committed tags are stored in UTF-8
203 # committed tags are stored in UTF-8
204 writetags(fp, names, util.fromlocal, prevtags)
204 writetags(fp, names, util.fromlocal, prevtags)
205
205
206 if use_dirstate and '.hgtags' not in self.dirstate:
206 if use_dirstate and '.hgtags' not in self.dirstate:
207 self.add(['.hgtags'])
207 self.add(['.hgtags'])
208
208
209 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
209 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
210 extra=extra)
210 extra=extra)
211
211
212 for name in names:
212 for name in names:
213 self.hook('tag', node=hex(node), tag=name, local=local)
213 self.hook('tag', node=hex(node), tag=name, local=local)
214
214
215 return tagnode
215 return tagnode
216
216
217 def tag(self, names, node, message, local, user, date):
217 def tag(self, names, node, message, local, user, date):
218 '''tag a revision with one or more symbolic names.
218 '''tag a revision with one or more symbolic names.
219
219
220 names is a list of strings or, when adding a single tag, names may be a
220 names is a list of strings or, when adding a single tag, names may be a
221 string.
221 string.
222
222
223 if local is True, the tags are stored in a per-repository file.
223 if local is True, the tags are stored in a per-repository file.
224 otherwise, they are stored in the .hgtags file, and a new
224 otherwise, they are stored in the .hgtags file, and a new
225 changeset is committed with the change.
225 changeset is committed with the change.
226
226
227 keyword arguments:
227 keyword arguments:
228
228
229 local: whether to store tags in non-version-controlled file
229 local: whether to store tags in non-version-controlled file
230 (default False)
230 (default False)
231
231
232 message: commit message to use if committing
232 message: commit message to use if committing
233
233
234 user: name of user to use if committing
234 user: name of user to use if committing
235
235
236 date: date tuple to use if committing'''
236 date: date tuple to use if committing'''
237
237
238 for x in self.status()[:5]:
238 for x in self.status()[:5]:
239 if '.hgtags' in x:
239 if '.hgtags' in x:
240 raise util.Abort(_('working copy of .hgtags is changed '
240 raise util.Abort(_('working copy of .hgtags is changed '
241 '(please commit .hgtags manually)'))
241 '(please commit .hgtags manually)'))
242
242
243 self._tag(names, node, message, local, user, date)
243 self._tag(names, node, message, local, user, date)
244
244
245 def tags(self):
245 def tags(self):
246 '''return a mapping of tag to node'''
246 '''return a mapping of tag to node'''
247 if self.tagscache:
247 if self.tagscache:
248 return self.tagscache
248 return self.tagscache
249
249
250 globaltags = {}
250 globaltags = {}
251 tagtypes = {}
251 tagtypes = {}
252
252
253 def readtags(lines, fn, tagtype):
253 def readtags(lines, fn, tagtype):
254 filetags = {}
254 filetags = {}
255 count = 0
255 count = 0
256
256
257 def warn(msg):
257 def warn(msg):
258 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
258 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
259
259
260 for l in lines:
260 for l in lines:
261 count += 1
261 count += 1
262 if not l:
262 if not l:
263 continue
263 continue
264 s = l.split(" ", 1)
264 s = l.split(" ", 1)
265 if len(s) != 2:
265 if len(s) != 2:
266 warn(_("cannot parse entry"))
266 warn(_("cannot parse entry"))
267 continue
267 continue
268 node, key = s
268 node, key = s
269 key = util.tolocal(key.strip()) # stored in UTF-8
269 key = util.tolocal(key.strip()) # stored in UTF-8
270 try:
270 try:
271 bin_n = bin(node)
271 bin_n = bin(node)
272 except TypeError:
272 except TypeError:
273 warn(_("node '%s' is not well formed") % node)
273 warn(_("node '%s' is not well formed") % node)
274 continue
274 continue
275 if bin_n not in self.changelog.nodemap:
275 if bin_n not in self.changelog.nodemap:
276 warn(_("tag '%s' refers to unknown node") % key)
276 warn(_("tag '%s' refers to unknown node") % key)
277 continue
277 continue
278
278
279 h = []
279 h = []
280 if key in filetags:
280 if key in filetags:
281 n, h = filetags[key]
281 n, h = filetags[key]
282 h.append(n)
282 h.append(n)
283 filetags[key] = (bin_n, h)
283 filetags[key] = (bin_n, h)
284
284
285 for k, nh in filetags.items():
285 for k, nh in filetags.items():
286 if k not in globaltags:
286 if k not in globaltags:
287 globaltags[k] = nh
287 globaltags[k] = nh
288 tagtypes[k] = tagtype
288 tagtypes[k] = tagtype
289 continue
289 continue
290
290
291 # we prefer the global tag if:
291 # we prefer the global tag if:
292 # it supercedes us OR
292 # it supercedes us OR
293 # mutual supercedes and it has a higher rank
293 # mutual supercedes and it has a higher rank
294 # otherwise we win because we're tip-most
294 # otherwise we win because we're tip-most
295 an, ah = nh
295 an, ah = nh
296 bn, bh = globaltags[k]
296 bn, bh = globaltags[k]
297 if (bn != an and an in bh and
297 if (bn != an and an in bh and
298 (bn not in ah or len(bh) > len(ah))):
298 (bn not in ah or len(bh) > len(ah))):
299 an = bn
299 an = bn
300 ah.extend([n for n in bh if n not in ah])
300 ah.extend([n for n in bh if n not in ah])
301 globaltags[k] = an, ah
301 globaltags[k] = an, ah
302 tagtypes[k] = tagtype
302 tagtypes[k] = tagtype
303
303
304 # read the tags file from each head, ending with the tip
304 # read the tags file from each head, ending with the tip
305 f = None
305 f = None
306 for rev, node, fnode in self._hgtagsnodes():
306 for rev, node, fnode in self._hgtagsnodes():
307 f = (f and f.filectx(fnode) or
307 f = (f and f.filectx(fnode) or
308 self.filectx('.hgtags', fileid=fnode))
308 self.filectx('.hgtags', fileid=fnode))
309 readtags(f.data().splitlines(), f, "global")
309 readtags(f.data().splitlines(), f, "global")
310
310
311 try:
311 try:
312 data = util.fromlocal(self.opener("localtags").read())
312 data = util.fromlocal(self.opener("localtags").read())
313 # localtags are stored in the local character set
313 # localtags are stored in the local character set
314 # while the internal tag table is stored in UTF-8
314 # while the internal tag table is stored in UTF-8
315 readtags(data.splitlines(), "localtags", "local")
315 readtags(data.splitlines(), "localtags", "local")
316 except IOError:
316 except IOError:
317 pass
317 pass
318
318
319 self.tagscache = {}
319 self.tagscache = {}
320 self._tagstypecache = {}
320 self._tagstypecache = {}
321 for k,nh in globaltags.items():
321 for k,nh in globaltags.items():
322 n = nh[0]
322 n = nh[0]
323 if n != nullid:
323 if n != nullid:
324 self.tagscache[k] = n
324 self.tagscache[k] = n
325 self._tagstypecache[k] = tagtypes[k]
325 self._tagstypecache[k] = tagtypes[k]
326 self.tagscache['tip'] = self.changelog.tip()
326 self.tagscache['tip'] = self.changelog.tip()
327 return self.tagscache
327 return self.tagscache
328
328
329 def tagtype(self, tagname):
329 def tagtype(self, tagname):
330 '''
330 '''
331 return the type of the given tag. result can be:
331 return the type of the given tag. result can be:
332
332
333 'local' : a local tag
333 'local' : a local tag
334 'global' : a global tag
334 'global' : a global tag
335 None : tag does not exist
335 None : tag does not exist
336 '''
336 '''
337
337
338 self.tags()
338 self.tags()
339
339
340 return self._tagstypecache.get(tagname)
340 return self._tagstypecache.get(tagname)
341
341
342 def _hgtagsnodes(self):
342 def _hgtagsnodes(self):
343 heads = self.heads()
343 heads = self.heads()
344 heads.reverse()
344 heads.reverse()
345 last = {}
345 last = {}
346 ret = []
346 ret = []
347 for node in heads:
347 for node in heads:
348 c = self[node]
348 c = self[node]
349 rev = c.rev()
349 rev = c.rev()
350 try:
350 try:
351 fnode = c.filenode('.hgtags')
351 fnode = c.filenode('.hgtags')
352 except revlog.LookupError:
352 except revlog.LookupError:
353 continue
353 continue
354 ret.append((rev, node, fnode))
354 ret.append((rev, node, fnode))
355 if fnode in last:
355 if fnode in last:
356 ret[last[fnode]] = None
356 ret[last[fnode]] = None
357 last[fnode] = len(ret) - 1
357 last[fnode] = len(ret) - 1
358 return [item for item in ret if item]
358 return [item for item in ret if item]
359
359
360 def tagslist(self):
360 def tagslist(self):
361 '''return a list of tags ordered by revision'''
361 '''return a list of tags ordered by revision'''
362 l = []
362 l = []
363 for t, n in self.tags().items():
363 for t, n in self.tags().items():
364 try:
364 try:
365 r = self.changelog.rev(n)
365 r = self.changelog.rev(n)
366 except:
366 except:
367 r = -2 # sort to the beginning of the list if unknown
367 r = -2 # sort to the beginning of the list if unknown
368 l.append((r, t, n))
368 l.append((r, t, n))
369 l.sort()
369 l.sort()
370 return [(t, n) for r, t, n in l]
370 return [(t, n) for r, t, n in l]
371
371
372 def nodetags(self, node):
372 def nodetags(self, node):
373 '''return the tags associated with a node'''
373 '''return the tags associated with a node'''
374 if not self.nodetagscache:
374 if not self.nodetagscache:
375 self.nodetagscache = {}
375 self.nodetagscache = {}
376 for t, n in self.tags().items():
376 for t, n in self.tags().items():
377 self.nodetagscache.setdefault(n, []).append(t)
377 self.nodetagscache.setdefault(n, []).append(t)
378 return self.nodetagscache.get(node, [])
378 return self.nodetagscache.get(node, [])
379
379
380 def _branchtags(self, partial, lrev):
380 def _branchtags(self, partial, lrev):
381 tiprev = len(self) - 1
381 tiprev = len(self) - 1
382 if lrev != tiprev:
382 if lrev != tiprev:
383 self._updatebranchcache(partial, lrev+1, tiprev+1)
383 self._updatebranchcache(partial, lrev+1, tiprev+1)
384 self._writebranchcache(partial, self.changelog.tip(), tiprev)
384 self._writebranchcache(partial, self.changelog.tip(), tiprev)
385
385
386 return partial
386 return partial
387
387
388 def branchtags(self):
388 def branchtags(self):
389 tip = self.changelog.tip()
389 tip = self.changelog.tip()
390 if self.branchcache is not None and self._branchcachetip == tip:
390 if self.branchcache is not None and self._branchcachetip == tip:
391 return self.branchcache
391 return self.branchcache
392
392
393 oldtip = self._branchcachetip
393 oldtip = self._branchcachetip
394 self._branchcachetip = tip
394 self._branchcachetip = tip
395 if self.branchcache is None:
395 if self.branchcache is None:
396 self.branchcache = {} # avoid recursion in changectx
396 self.branchcache = {} # avoid recursion in changectx
397 else:
397 else:
398 self.branchcache.clear() # keep using the same dict
398 self.branchcache.clear() # keep using the same dict
399 if oldtip is None or oldtip not in self.changelog.nodemap:
399 if oldtip is None or oldtip not in self.changelog.nodemap:
400 partial, last, lrev = self._readbranchcache()
400 partial, last, lrev = self._readbranchcache()
401 else:
401 else:
402 lrev = self.changelog.rev(oldtip)
402 lrev = self.changelog.rev(oldtip)
403 partial = self._ubranchcache
403 partial = self._ubranchcache
404
404
405 self._branchtags(partial, lrev)
405 self._branchtags(partial, lrev)
406
406
407 # the branch cache is stored on disk as UTF-8, but in the local
407 # the branch cache is stored on disk as UTF-8, but in the local
408 # charset internally
408 # charset internally
409 for k, v in partial.items():
409 for k, v in partial.items():
410 self.branchcache[util.tolocal(k)] = v
410 self.branchcache[util.tolocal(k)] = v
411 self._ubranchcache = partial
411 self._ubranchcache = partial
412 return self.branchcache
412 return self.branchcache
413
413
414 def _readbranchcache(self):
414 def _readbranchcache(self):
415 partial = {}
415 partial = {}
416 try:
416 try:
417 f = self.opener("branch.cache")
417 f = self.opener("branch.cache")
418 lines = f.read().split('\n')
418 lines = f.read().split('\n')
419 f.close()
419 f.close()
420 except (IOError, OSError):
420 except (IOError, OSError):
421 return {}, nullid, nullrev
421 return {}, nullid, nullrev
422
422
423 try:
423 try:
424 last, lrev = lines.pop(0).split(" ", 1)
424 last, lrev = lines.pop(0).split(" ", 1)
425 last, lrev = bin(last), int(lrev)
425 last, lrev = bin(last), int(lrev)
426 if lrev >= len(self) or self[lrev].node() != last:
426 if lrev >= len(self) or self[lrev].node() != last:
427 # invalidate the cache
427 # invalidate the cache
428 raise ValueError('invalidating branch cache (tip differs)')
428 raise ValueError('invalidating branch cache (tip differs)')
429 for l in lines:
429 for l in lines:
430 if not l: continue
430 if not l: continue
431 node, label = l.split(" ", 1)
431 node, label = l.split(" ", 1)
432 partial[label.strip()] = bin(node)
432 partial[label.strip()] = bin(node)
433 except (KeyboardInterrupt, util.SignalInterrupt):
433 except (KeyboardInterrupt, util.SignalInterrupt):
434 raise
434 raise
435 except Exception, inst:
435 except Exception, inst:
436 if self.ui.debugflag:
436 if self.ui.debugflag:
437 self.ui.warn(str(inst), '\n')
437 self.ui.warn(str(inst), '\n')
438 partial, last, lrev = {}, nullid, nullrev
438 partial, last, lrev = {}, nullid, nullrev
439 return partial, last, lrev
439 return partial, last, lrev
440
440
441 def _writebranchcache(self, branches, tip, tiprev):
441 def _writebranchcache(self, branches, tip, tiprev):
442 try:
442 try:
443 f = self.opener("branch.cache", "w", atomictemp=True)
443 f = self.opener("branch.cache", "w", atomictemp=True)
444 f.write("%s %s\n" % (hex(tip), tiprev))
444 f.write("%s %s\n" % (hex(tip), tiprev))
445 for label, node in branches.iteritems():
445 for label, node in branches.iteritems():
446 f.write("%s %s\n" % (hex(node), label))
446 f.write("%s %s\n" % (hex(node), label))
447 f.rename()
447 f.rename()
448 except (IOError, OSError):
448 except (IOError, OSError):
449 pass
449 pass
450
450
451 def _updatebranchcache(self, partial, start, end):
451 def _updatebranchcache(self, partial, start, end):
452 for r in xrange(start, end):
452 for r in xrange(start, end):
453 c = self[r]
453 c = self[r]
454 b = c.branch()
454 b = c.branch()
455 partial[b] = c.node()
455 partial[b] = c.node()
456
456
457 def lookup(self, key):
457 def lookup(self, key):
458 if key == '.':
458 if key == '.':
459 return self.dirstate.parents()[0]
459 return self.dirstate.parents()[0]
460 elif key == 'null':
460 elif key == 'null':
461 return nullid
461 return nullid
462 n = self.changelog._match(key)
462 n = self.changelog._match(key)
463 if n:
463 if n:
464 return n
464 return n
465 if key in self.tags():
465 if key in self.tags():
466 return self.tags()[key]
466 return self.tags()[key]
467 if key in self.branchtags():
467 if key in self.branchtags():
468 return self.branchtags()[key]
468 return self.branchtags()[key]
469 n = self.changelog._partialmatch(key)
469 n = self.changelog._partialmatch(key)
470 if n:
470 if n:
471 return n
471 return n
472 try:
472 try:
473 if len(key) == 20:
473 if len(key) == 20:
474 key = hex(key)
474 key = hex(key)
475 except:
475 except:
476 pass
476 pass
477 raise repo.RepoError(_("unknown revision '%s'") % key)
477 raise repo.RepoError(_("unknown revision '%s'") % key)
478
478
479 def local(self):
479 def local(self):
480 return True
480 return True
481
481
482 def join(self, f):
482 def join(self, f):
483 return os.path.join(self.path, f)
483 return os.path.join(self.path, f)
484
484
485 def sjoin(self, f):
485 def sjoin(self, f):
486 f = self.encodefn(f)
486 f = self.encodefn(f)
487 return os.path.join(self.spath, f)
487 return os.path.join(self.spath, f)
488
488
489 def wjoin(self, f):
489 def wjoin(self, f):
490 return os.path.join(self.root, f)
490 return os.path.join(self.root, f)
491
491
492 def rjoin(self, f):
492 def rjoin(self, f):
493 return os.path.join(self.root, util.pconvert(f))
493 return os.path.join(self.root, util.pconvert(f))
494
494
495 def file(self, f):
495 def file(self, f):
496 if f[0] == '/':
496 if f[0] == '/':
497 f = f[1:]
497 f = f[1:]
498 return filelog.filelog(self.sopener, f)
498 return filelog.filelog(self.sopener, f)
499
499
500 def changectx(self, changeid):
500 def changectx(self, changeid):
501 return self[changeid]
501 return self[changeid]
502
502
503 def parents(self, changeid=None):
503 def parents(self, changeid=None):
504 '''get list of changectxs for parents of changeid'''
504 '''get list of changectxs for parents of changeid'''
505 return self[changeid].parents()
505 return self[changeid].parents()
506
506
507 def filectx(self, path, changeid=None, fileid=None):
507 def filectx(self, path, changeid=None, fileid=None):
508 """changeid can be a changeset revision, node, or tag.
508 """changeid can be a changeset revision, node, or tag.
509 fileid can be a file revision or node."""
509 fileid can be a file revision or node."""
510 return context.filectx(self, path, changeid, fileid)
510 return context.filectx(self, path, changeid, fileid)
511
511
512 def getcwd(self):
512 def getcwd(self):
513 return self.dirstate.getcwd()
513 return self.dirstate.getcwd()
514
514
515 def pathto(self, f, cwd=None):
515 def pathto(self, f, cwd=None):
516 return self.dirstate.pathto(f, cwd)
516 return self.dirstate.pathto(f, cwd)
517
517
518 def wfile(self, f, mode='r'):
518 def wfile(self, f, mode='r'):
519 return self.wopener(f, mode)
519 return self.wopener(f, mode)
520
520
521 def _link(self, f):
521 def _link(self, f):
522 return os.path.islink(self.wjoin(f))
522 return os.path.islink(self.wjoin(f))
523
523
524 def _filter(self, filter, filename, data):
524 def _filter(self, filter, filename, data):
525 if filter not in self.filterpats:
525 if filter not in self.filterpats:
526 l = []
526 l = []
527 for pat, cmd in self.ui.configitems(filter):
527 for pat, cmd in self.ui.configitems(filter):
528 mf = util.matcher(self.root, "", [pat], [], [])[1]
528 mf = util.matcher(self.root, "", [pat], [], [])[1]
529 fn = None
529 fn = None
530 params = cmd
530 params = cmd
531 for name, filterfn in self._datafilters.iteritems():
531 for name, filterfn in self._datafilters.iteritems():
532 if cmd.startswith(name):
532 if cmd.startswith(name):
533 fn = filterfn
533 fn = filterfn
534 params = cmd[len(name):].lstrip()
534 params = cmd[len(name):].lstrip()
535 break
535 break
536 if not fn:
536 if not fn:
537 fn = lambda s, c, **kwargs: util.filter(s, c)
537 fn = lambda s, c, **kwargs: util.filter(s, c)
538 # Wrap old filters not supporting keyword arguments
538 # Wrap old filters not supporting keyword arguments
539 if not inspect.getargspec(fn)[2]:
539 if not inspect.getargspec(fn)[2]:
540 oldfn = fn
540 oldfn = fn
541 fn = lambda s, c, **kwargs: oldfn(s, c)
541 fn = lambda s, c, **kwargs: oldfn(s, c)
542 l.append((mf, fn, params))
542 l.append((mf, fn, params))
543 self.filterpats[filter] = l
543 self.filterpats[filter] = l
544
544
545 for mf, fn, cmd in self.filterpats[filter]:
545 for mf, fn, cmd in self.filterpats[filter]:
546 if mf(filename):
546 if mf(filename):
547 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
547 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
548 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
548 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
549 break
549 break
550
550
551 return data
551 return data
552
552
553 def adddatafilter(self, name, filter):
553 def adddatafilter(self, name, filter):
554 self._datafilters[name] = filter
554 self._datafilters[name] = filter
555
555
556 def wread(self, filename):
556 def wread(self, filename):
557 if self._link(filename):
557 if self._link(filename):
558 data = os.readlink(self.wjoin(filename))
558 data = os.readlink(self.wjoin(filename))
559 else:
559 else:
560 data = self.wopener(filename, 'r').read()
560 data = self.wopener(filename, 'r').read()
561 return self._filter("encode", filename, data)
561 return self._filter("encode", filename, data)
562
562
563 def wwrite(self, filename, data, flags):
563 def wwrite(self, filename, data, flags):
564 data = self._filter("decode", filename, data)
564 data = self._filter("decode", filename, data)
565 try:
565 try:
566 os.unlink(self.wjoin(filename))
566 os.unlink(self.wjoin(filename))
567 except OSError:
567 except OSError:
568 pass
568 pass
569 self.wopener(filename, 'w').write(data)
569 self.wopener(filename, 'w').write(data)
570 util.set_flags(self.wjoin(filename), flags)
570 util.set_flags(self.wjoin(filename), flags)
571
571
572 def wwritedata(self, filename, data):
572 def wwritedata(self, filename, data):
573 return self._filter("decode", filename, data)
573 return self._filter("decode", filename, data)
574
574
575 def transaction(self):
575 def transaction(self):
576 if self._transref and self._transref():
576 if self._transref and self._transref():
577 return self._transref().nest()
577 return self._transref().nest()
578
578
579 # abort here if the journal already exists
579 # abort here if the journal already exists
580 if os.path.exists(self.sjoin("journal")):
580 if os.path.exists(self.sjoin("journal")):
581 raise repo.RepoError(_("journal already exists - run hg recover"))
581 raise repo.RepoError(_("journal already exists - run hg recover"))
582
582
583 # save dirstate for rollback
583 # save dirstate for rollback
584 try:
584 try:
585 ds = self.opener("dirstate").read()
585 ds = self.opener("dirstate").read()
586 except IOError:
586 except IOError:
587 ds = ""
587 ds = ""
588 self.opener("journal.dirstate", "w").write(ds)
588 self.opener("journal.dirstate", "w").write(ds)
589 self.opener("journal.branch", "w").write(self.dirstate.branch())
589 self.opener("journal.branch", "w").write(self.dirstate.branch())
590
590
591 renames = [(self.sjoin("journal"), self.sjoin("undo")),
591 renames = [(self.sjoin("journal"), self.sjoin("undo")),
592 (self.join("journal.dirstate"), self.join("undo.dirstate")),
592 (self.join("journal.dirstate"), self.join("undo.dirstate")),
593 (self.join("journal.branch"), self.join("undo.branch"))]
593 (self.join("journal.branch"), self.join("undo.branch"))]
594 tr = transaction.transaction(self.ui.warn, self.sopener,
594 tr = transaction.transaction(self.ui.warn, self.sopener,
595 self.sjoin("journal"),
595 self.sjoin("journal"),
596 aftertrans(renames),
596 aftertrans(renames),
597 self._createmode)
597 self._createmode)
598 self._transref = weakref.ref(tr)
598 self._transref = weakref.ref(tr)
599 return tr
599 return tr
600
600
601 def recover(self):
601 def recover(self):
602 l = self.lock()
602 l = self.lock()
603 try:
603 try:
604 if os.path.exists(self.sjoin("journal")):
604 if os.path.exists(self.sjoin("journal")):
605 self.ui.status(_("rolling back interrupted transaction\n"))
605 self.ui.status(_("rolling back interrupted transaction\n"))
606 transaction.rollback(self.sopener, self.sjoin("journal"))
606 transaction.rollback(self.sopener, self.sjoin("journal"))
607 self.invalidate()
607 self.invalidate()
608 return True
608 return True
609 else:
609 else:
610 self.ui.warn(_("no interrupted transaction available\n"))
610 self.ui.warn(_("no interrupted transaction available\n"))
611 return False
611 return False
612 finally:
612 finally:
613 del l
613 del l
614
614
615 def rollback(self):
615 def rollback(self):
616 wlock = lock = None
616 wlock = lock = None
617 try:
617 try:
618 wlock = self.wlock()
618 wlock = self.wlock()
619 lock = self.lock()
619 lock = self.lock()
620 if os.path.exists(self.sjoin("undo")):
620 if os.path.exists(self.sjoin("undo")):
621 self.ui.status(_("rolling back last transaction\n"))
621 self.ui.status(_("rolling back last transaction\n"))
622 transaction.rollback(self.sopener, self.sjoin("undo"))
622 transaction.rollback(self.sopener, self.sjoin("undo"))
623 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
623 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
624 try:
624 try:
625 branch = self.opener("undo.branch").read()
625 branch = self.opener("undo.branch").read()
626 self.dirstate.setbranch(branch)
626 self.dirstate.setbranch(branch)
627 except IOError:
627 except IOError:
628 self.ui.warn(_("Named branch could not be reset, "
628 self.ui.warn(_("Named branch could not be reset, "
629 "current branch still is: %s\n")
629 "current branch still is: %s\n")
630 % util.tolocal(self.dirstate.branch()))
630 % util.tolocal(self.dirstate.branch()))
631 self.invalidate()
631 self.invalidate()
632 self.dirstate.invalidate()
632 self.dirstate.invalidate()
633 else:
633 else:
634 self.ui.warn(_("no rollback information available\n"))
634 self.ui.warn(_("no rollback information available\n"))
635 finally:
635 finally:
636 del lock, wlock
636 del lock, wlock
637
637
638 def invalidate(self):
638 def invalidate(self):
639 for a in "changelog manifest".split():
639 for a in "changelog manifest".split():
640 if a in self.__dict__:
640 if a in self.__dict__:
641 delattr(self, a)
641 delattr(self, a)
642 self.tagscache = None
642 self.tagscache = None
643 self._tagstypecache = None
643 self._tagstypecache = None
644 self.nodetagscache = None
644 self.nodetagscache = None
645 self.branchcache = None
645 self.branchcache = None
646 self._ubranchcache = None
646 self._ubranchcache = None
647 self._branchcachetip = None
647 self._branchcachetip = None
648
648
649 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
649 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
650 try:
650 try:
651 l = lock.lock(lockname, 0, releasefn, desc=desc)
651 l = lock.lock(lockname, 0, releasefn, desc=desc)
652 except lock.LockHeld, inst:
652 except lock.LockHeld, inst:
653 if not wait:
653 if not wait:
654 raise
654 raise
655 self.ui.warn(_("waiting for lock on %s held by %r\n") %
655 self.ui.warn(_("waiting for lock on %s held by %r\n") %
656 (desc, inst.locker))
656 (desc, inst.locker))
657 # default to 600 seconds timeout
657 # default to 600 seconds timeout
658 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
658 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
659 releasefn, desc=desc)
659 releasefn, desc=desc)
660 if acquirefn:
660 if acquirefn:
661 acquirefn()
661 acquirefn()
662 return l
662 return l
663
663
664 def lock(self, wait=True):
664 def lock(self, wait=True):
665 if self._lockref and self._lockref():
665 if self._lockref and self._lockref():
666 return self._lockref()
666 return self._lockref()
667
667
668 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
668 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
669 _('repository %s') % self.origroot)
669 _('repository %s') % self.origroot)
670 self._lockref = weakref.ref(l)
670 self._lockref = weakref.ref(l)
671 return l
671 return l
672
672
673 def wlock(self, wait=True):
673 def wlock(self, wait=True):
674 if self._wlockref and self._wlockref():
674 if self._wlockref and self._wlockref():
675 return self._wlockref()
675 return self._wlockref()
676
676
677 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
677 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
678 self.dirstate.invalidate, _('working directory of %s') %
678 self.dirstate.invalidate, _('working directory of %s') %
679 self.origroot)
679 self.origroot)
680 self._wlockref = weakref.ref(l)
680 self._wlockref = weakref.ref(l)
681 return l
681 return l
682
682
683 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
683 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
684 """
684 """
685 commit an individual file as part of a larger transaction
685 commit an individual file as part of a larger transaction
686 """
686 """
687
687
688 fn = fctx.path()
688 fn = fctx.path()
689 t = fctx.data()
689 t = fctx.data()
690 fl = self.file(fn)
690 fl = self.file(fn)
691 fp1 = manifest1.get(fn, nullid)
691 fp1 = manifest1.get(fn, nullid)
692 fp2 = manifest2.get(fn, nullid)
692 fp2 = manifest2.get(fn, nullid)
693
693
694 meta = {}
694 meta = {}
695 cp = fctx.renamed()
695 cp = fctx.renamed()
696 if cp and cp[0] != fn:
696 if cp and cp[0] != fn:
697 cp = cp[0]
697 cp = cp[0]
698 # Mark the new revision of this file as a copy of another
698 # Mark the new revision of this file as a copy of another
699 # file. This copy data will effectively act as a parent
699 # file. This copy data will effectively act as a parent
700 # of this new revision. If this is a merge, the first
700 # of this new revision. If this is a merge, the first
701 # parent will be the nullid (meaning "look up the copy data")
701 # parent will be the nullid (meaning "look up the copy data")
702 # and the second one will be the other parent. For example:
702 # and the second one will be the other parent. For example:
703 #
703 #
704 # 0 --- 1 --- 3 rev1 changes file foo
704 # 0 --- 1 --- 3 rev1 changes file foo
705 # \ / rev2 renames foo to bar and changes it
705 # \ / rev2 renames foo to bar and changes it
706 # \- 2 -/ rev3 should have bar with all changes and
706 # \- 2 -/ rev3 should have bar with all changes and
707 # should record that bar descends from
707 # should record that bar descends from
708 # bar in rev2 and foo in rev1
708 # bar in rev2 and foo in rev1
709 #
709 #
710 # this allows this merge to succeed:
710 # this allows this merge to succeed:
711 #
711 #
712 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
712 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
713 # \ / merging rev3 and rev4 should use bar@rev2
713 # \ / merging rev3 and rev4 should use bar@rev2
714 # \- 2 --- 4 as the merge base
714 # \- 2 --- 4 as the merge base
715 #
715 #
716 meta["copy"] = cp
716 meta["copy"] = cp
717 if not manifest2: # not a branch merge
717 if not manifest2: # not a branch merge
718 meta["copyrev"] = hex(manifest1[cp])
718 meta["copyrev"] = hex(manifest1[cp])
719 fp2 = nullid
719 fp2 = nullid
720 elif fp2 != nullid: # copied on remote side
720 elif fp2 != nullid: # copied on remote side
721 meta["copyrev"] = hex(manifest1[cp])
721 meta["copyrev"] = hex(manifest1[cp])
722 elif fp1 != nullid: # copied on local side, reversed
722 elif fp1 != nullid: # copied on local side, reversed
723 meta["copyrev"] = hex(manifest2[cp])
723 meta["copyrev"] = hex(manifest2[cp])
724 fp2 = fp1
724 fp2 = fp1
725 elif cp in manifest2: # directory rename on local side
725 elif cp in manifest2: # directory rename on local side
726 meta["copyrev"] = hex(manifest2[cp])
726 meta["copyrev"] = hex(manifest2[cp])
727 else: # directory rename on remote side
727 else: # directory rename on remote side
728 meta["copyrev"] = hex(manifest1[cp])
728 meta["copyrev"] = hex(manifest1[cp])
729 self.ui.debug(_(" %s: copy %s:%s\n") %
729 self.ui.debug(_(" %s: copy %s:%s\n") %
730 (fn, cp, meta["copyrev"]))
730 (fn, cp, meta["copyrev"]))
731 fp1 = nullid
731 fp1 = nullid
732 elif fp2 != nullid:
732 elif fp2 != nullid:
733 # is one parent an ancestor of the other?
733 # is one parent an ancestor of the other?
734 fpa = fl.ancestor(fp1, fp2)
734 fpa = fl.ancestor(fp1, fp2)
735 if fpa == fp1:
735 if fpa == fp1:
736 fp1, fp2 = fp2, nullid
736 fp1, fp2 = fp2, nullid
737 elif fpa == fp2:
737 elif fpa == fp2:
738 fp2 = nullid
738 fp2 = nullid
739
739
740 # is the file unmodified from the parent? report existing entry
740 # is the file unmodified from the parent? report existing entry
741 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
741 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
742 return fp1
742 return fp1
743
743
744 changelist.append(fn)
744 changelist.append(fn)
745 return fl.add(t, meta, tr, linkrev, fp1, fp2)
745 return fl.add(t, meta, tr, linkrev, fp1, fp2)
746
746
747 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
747 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
748 if p1 is None:
748 if p1 is None:
749 p1, p2 = self.dirstate.parents()
749 p1, p2 = self.dirstate.parents()
750 return self.commit(files=files, text=text, user=user, date=date,
750 return self.commit(files=files, text=text, user=user, date=date,
751 p1=p1, p2=p2, extra=extra, empty_ok=True)
751 p1=p1, p2=p2, extra=extra, empty_ok=True)
752
752
753 def commit(self, files=None, text="", user=None, date=None,
753 def commit(self, files=None, text="", user=None, date=None,
754 match=None, force=False, force_editor=False,
754 match=None, force=False, force_editor=False,
755 p1=None, p2=None, extra={}, empty_ok=False):
755 p1=None, p2=None, extra={}, empty_ok=False):
756 wlock = lock = None
756 wlock = lock = None
757 if files:
757 if files:
758 files = util.unique(files)
758 files = util.unique(files)
759 try:
759 try:
760 wlock = self.wlock()
760 wlock = self.wlock()
761 lock = self.lock()
761 lock = self.lock()
762 use_dirstate = (p1 is None) # not rawcommit
762 use_dirstate = (p1 is None) # not rawcommit
763
763
764 if use_dirstate:
764 if use_dirstate:
765 p1, p2 = self.dirstate.parents()
765 p1, p2 = self.dirstate.parents()
766 update_dirstate = True
766 update_dirstate = True
767
767
768 if (not force and p2 != nullid and
768 if (not force and p2 != nullid and
769 (match and (match.files() or match.anypats()))):
769 (match and (match.files() or match.anypats()))):
770 raise util.Abort(_('cannot partially commit a merge '
770 raise util.Abort(_('cannot partially commit a merge '
771 '(do not specify files or patterns)'))
771 '(do not specify files or patterns)'))
772
772
773 if files:
773 if files:
774 modified, removed = [], []
774 modified, removed = [], []
775 for f in files:
775 for f in files:
776 s = self.dirstate[f]
776 s = self.dirstate[f]
777 if s in 'nma':
777 if s in 'nma':
778 modified.append(f)
778 modified.append(f)
779 elif s == 'r':
779 elif s == 'r':
780 removed.append(f)
780 removed.append(f)
781 else:
781 else:
782 self.ui.warn(_("%s not tracked!\n") % f)
782 self.ui.warn(_("%s not tracked!\n") % f)
783 changes = [modified, [], removed, [], []]
783 changes = [modified, [], removed, [], []]
784 else:
784 else:
785 changes = self.status(match=match)
785 changes = self.status(match=match)
786 else:
786 else:
787 p1, p2 = p1, p2 or nullid
787 p1, p2 = p1, p2 or nullid
788 update_dirstate = (self.dirstate.parents()[0] == p1)
788 update_dirstate = (self.dirstate.parents()[0] == p1)
789 changes = [files, [], [], [], []]
789 changes = [files, [], [], [], []]
790
790
791 wctx = context.workingctx(self, (p1, p2), text, user, date,
791 wctx = context.workingctx(self, (p1, p2), text, user, date,
792 extra, changes)
792 extra, changes)
793 return self._commitctx(wctx, force, force_editor, empty_ok,
793 return self._commitctx(wctx, force, force_editor, empty_ok,
794 use_dirstate, update_dirstate)
794 use_dirstate, update_dirstate)
795 finally:
795 finally:
796 del lock, wlock
796 del lock, wlock
797
797
798 def commitctx(self, ctx):
798 def commitctx(self, ctx):
799 wlock = lock = None
799 wlock = lock = None
800 try:
800 try:
801 wlock = self.wlock()
801 wlock = self.wlock()
802 lock = self.lock()
802 lock = self.lock()
803 return self._commitctx(ctx, force=True, force_editor=False,
803 return self._commitctx(ctx, force=True, force_editor=False,
804 empty_ok=True, use_dirstate=False,
804 empty_ok=True, use_dirstate=False,
805 update_dirstate=False)
805 update_dirstate=False)
806 finally:
806 finally:
807 del lock, wlock
807 del lock, wlock
808
808
809 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
809 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
810 use_dirstate=True, update_dirstate=True):
810 use_dirstate=True, update_dirstate=True):
811 tr = None
811 tr = None
812 valid = 0 # don't save the dirstate if this isn't set
812 valid = 0 # don't save the dirstate if this isn't set
813 try:
813 try:
814 commit = wctx.modified() + wctx.added()
814 commit = wctx.modified() + wctx.added()
815 remove = wctx.removed()
815 remove = wctx.removed()
816 extra = wctx.extra().copy()
816 extra = wctx.extra().copy()
817 branchname = extra['branch']
817 branchname = extra['branch']
818 user = wctx.user()
818 user = wctx.user()
819 text = wctx.description()
819 text = wctx.description()
820
820
821 p1, p2 = [p.node() for p in wctx.parents()]
821 p1, p2 = [p.node() for p in wctx.parents()]
822 c1 = self.changelog.read(p1)
822 c1 = self.changelog.read(p1)
823 c2 = self.changelog.read(p2)
823 c2 = self.changelog.read(p2)
824 m1 = self.manifest.read(c1[0]).copy()
824 m1 = self.manifest.read(c1[0]).copy()
825 m2 = self.manifest.read(c2[0])
825 m2 = self.manifest.read(c2[0])
826
826
827 if use_dirstate:
827 if use_dirstate:
828 oldname = c1[5].get("branch") # stored in UTF-8
828 oldname = c1[5].get("branch") # stored in UTF-8
829 if (not commit and not remove and not force and p2 == nullid
829 if (not commit and not remove and not force and p2 == nullid
830 and branchname == oldname):
830 and branchname == oldname):
831 self.ui.status(_("nothing changed\n"))
831 self.ui.status(_("nothing changed\n"))
832 return None
832 return None
833
833
834 xp1 = hex(p1)
834 xp1 = hex(p1)
835 if p2 == nullid: xp2 = ''
835 if p2 == nullid: xp2 = ''
836 else: xp2 = hex(p2)
836 else: xp2 = hex(p2)
837
837
838 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
838 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
839
839
840 tr = self.transaction()
840 tr = self.transaction()
841 trp = weakref.proxy(tr)
841 trp = weakref.proxy(tr)
842
842
843 # check in files
843 # check in files
844 new = {}
844 new = {}
845 changed = []
845 changed = []
846 linkrev = len(self)
846 linkrev = len(self)
847 commit.sort()
847 commit.sort()
848 for f in commit:
848 for f in commit:
849 self.ui.note(f + "\n")
849 self.ui.note(f + "\n")
850 try:
850 try:
851 fctx = wctx.filectx(f)
851 fctx = wctx.filectx(f)
852 newflags = fctx.flags()
852 newflags = fctx.flags()
853 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
853 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
854 if ((not changed or changed[-1] != f) and
854 if ((not changed or changed[-1] != f) and
855 m2.get(f) != new[f]):
855 m2.get(f) != new[f]):
856 # mention the file in the changelog if some
856 # mention the file in the changelog if some
857 # flag changed, even if there was no content
857 # flag changed, even if there was no content
858 # change.
858 # change.
859 if m1.flags(f) != newflags:
859 if m1.flags(f) != newflags:
860 changed.append(f)
860 changed.append(f)
861 m1.set(f, newflags)
861 m1.set(f, newflags)
862 if use_dirstate:
862 if use_dirstate:
863 self.dirstate.normal(f)
863 self.dirstate.normal(f)
864
864
865 except (OSError, IOError):
865 except (OSError, IOError):
866 if use_dirstate:
866 if use_dirstate:
867 self.ui.warn(_("trouble committing %s!\n") % f)
867 self.ui.warn(_("trouble committing %s!\n") % f)
868 raise
868 raise
869 else:
869 else:
870 remove.append(f)
870 remove.append(f)
871
871
872 # update manifest
872 # update manifest
873 m1.update(new)
873 m1.update(new)
874 remove.sort()
874 remove.sort()
875 removed = []
875 removed = []
876
876
877 for f in remove:
877 for f in remove:
878 if f in m1:
878 if f in m1:
879 del m1[f]
879 del m1[f]
880 removed.append(f)
880 removed.append(f)
881 elif f in m2:
881 elif f in m2:
882 removed.append(f)
882 removed.append(f)
883 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
883 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
884 (new, removed))
884 (new, removed))
885
885
886 # add changeset
886 # add changeset
887 if (not empty_ok and not text) or force_editor:
887 if (not empty_ok and not text) or force_editor:
888 edittext = []
888 edittext = []
889 if text:
889 if text:
890 edittext.append(text)
890 edittext.append(text)
891 edittext.append("")
891 edittext.append("")
892 edittext.append(_("HG: Enter commit message."
892 edittext.append(_("HG: Enter commit message."
893 " Lines beginning with 'HG:' are removed."))
893 " Lines beginning with 'HG:' are removed."))
894 edittext.append("HG: --")
894 edittext.append("HG: --")
895 edittext.append("HG: user: %s" % user)
895 edittext.append("HG: user: %s" % user)
896 if p2 != nullid:
896 if p2 != nullid:
897 edittext.append("HG: branch merge")
897 edittext.append("HG: branch merge")
898 if branchname:
898 if branchname:
899 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
899 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
900 edittext.extend(["HG: changed %s" % f for f in changed])
900 edittext.extend(["HG: changed %s" % f for f in changed])
901 edittext.extend(["HG: removed %s" % f for f in removed])
901 edittext.extend(["HG: removed %s" % f for f in removed])
902 if not changed and not remove:
902 if not changed and not remove:
903 edittext.append("HG: no files changed")
903 edittext.append("HG: no files changed")
904 edittext.append("")
904 edittext.append("")
905 # run editor in the repository root
905 # run editor in the repository root
906 olddir = os.getcwd()
906 olddir = os.getcwd()
907 os.chdir(self.root)
907 os.chdir(self.root)
908 text = self.ui.edit("\n".join(edittext), user)
908 text = self.ui.edit("\n".join(edittext), user)
909 os.chdir(olddir)
909 os.chdir(olddir)
910
910
911 lines = [line.rstrip() for line in text.rstrip().splitlines()]
911 lines = [line.rstrip() for line in text.rstrip().splitlines()]
912 while lines and not lines[0]:
912 while lines and not lines[0]:
913 del lines[0]
913 del lines[0]
914 if not lines and use_dirstate:
914 if not lines and use_dirstate:
915 raise util.Abort(_("empty commit message"))
915 raise util.Abort(_("empty commit message"))
916 text = '\n'.join(lines)
916 text = '\n'.join(lines)
917
917
918 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
918 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
919 user, wctx.date(), extra)
919 user, wctx.date(), extra)
920 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
920 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
921 parent2=xp2)
921 parent2=xp2)
922 tr.close()
922 tr.close()
923
923
924 if self.branchcache:
924 if self.branchcache:
925 self.branchtags()
925 self.branchtags()
926
926
927 if use_dirstate or update_dirstate:
927 if use_dirstate or update_dirstate:
928 self.dirstate.setparents(n)
928 self.dirstate.setparents(n)
929 if use_dirstate:
929 if use_dirstate:
930 for f in removed:
930 for f in removed:
931 self.dirstate.forget(f)
931 self.dirstate.forget(f)
932 valid = 1 # our dirstate updates are complete
932 valid = 1 # our dirstate updates are complete
933
933
934 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
934 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
935 return n
935 return n
936 finally:
936 finally:
937 if not valid: # don't save our updated dirstate
937 if not valid: # don't save our updated dirstate
938 self.dirstate.invalidate()
938 self.dirstate.invalidate()
939 del tr
939 del tr
940
940
941 def walk(self, match, node=None):
941 def walk(self, match, node=None):
942 '''
942 '''
943 walk recursively through the directory tree or a given
943 walk recursively through the directory tree or a given
944 changeset, finding all files matched by the match
944 changeset, finding all files matched by the match
945 function
945 function
946 '''
946 '''
947
947
948 if node:
948 if node:
949 fdict = dict.fromkeys(match.files())
949 fdict = dict.fromkeys(match.files())
950 # for dirstate.walk, files=['.'] means "walk the whole tree".
950 # for dirstate.walk, files=['.'] means "walk the whole tree".
951 # follow that here, too
951 # follow that here, too
952 fdict.pop('.', None)
952 fdict.pop('.', None)
953 mdict = self.manifest.read(self.changelog.read(node)[0])
953 mdict = self.manifest.read(self.changelog.read(node)[0])
954 mfiles = mdict.keys()
954 mfiles = mdict.keys()
955 mfiles.sort()
955 mfiles.sort()
956 for fn in mfiles:
956 for fn in mfiles:
957 for ffn in fdict:
957 for ffn in fdict:
958 # match if the file is the exact name or a directory
958 # match if the file is the exact name or a directory
959 if ffn == fn or fn.startswith("%s/" % ffn):
959 if ffn == fn or fn.startswith("%s/" % ffn):
960 del fdict[ffn]
960 del fdict[ffn]
961 break
961 break
962 if match(fn):
962 if match(fn):
963 yield fn
963 yield fn
964 ffiles = fdict.keys()
964 ffiles = fdict.keys()
965 ffiles.sort()
965 ffiles.sort()
966 for fn in ffiles:
966 for fn in ffiles:
967 if match.bad(fn, 'No such file in rev ' + short(node)) \
967 if match.bad(fn, 'No such file in rev ' + short(node)) \
968 and match(fn):
968 and match(fn):
969 yield fn
969 yield fn
970 else:
970 else:
971 for fn in self.dirstate.walk(match):
971 for src, fn, st in self.dirstate.walk(match, True, False):
972 yield fn
972 yield fn
973
973
974 def status(self, node1=None, node2=None, match=None,
974 def status(self, node1=None, node2=None, match=None,
975 ignored=False, clean=False, unknown=True):
975 ignored=False, clean=False, unknown=True):
976 """return status of files between two nodes or node and working directory
976 """return status of files between two nodes or node and working directory
977
977
978 If node1 is None, use the first dirstate parent instead.
978 If node1 is None, use the first dirstate parent instead.
979 If node2 is None, compare node1 with working directory.
979 If node2 is None, compare node1 with working directory.
980 """
980 """
981
981
982 def fcmp(fn, getnode):
982 def fcmp(fn, getnode):
983 t1 = self.wread(fn)
983 t1 = self.wread(fn)
984 return self.file(fn).cmp(getnode(fn), t1)
984 return self.file(fn).cmp(getnode(fn), t1)
985
985
986 def mfmatches(node):
986 def mfmatches(node):
987 change = self.changelog.read(node)
987 change = self.changelog.read(node)
988 mf = self.manifest.read(change[0]).copy()
988 mf = self.manifest.read(change[0]).copy()
989 for fn in mf.keys():
989 for fn in mf.keys():
990 if not match(fn):
990 if not match(fn):
991 del mf[fn]
991 del mf[fn]
992 return mf
992 return mf
993
993
994 if not match:
994 if not match:
995 match = match_.always(self.root, self.getcwd())
995 match = match_.always(self.root, self.getcwd())
996
996
997 listignored, listclean, listunknown = ignored, clean, unknown
997 listignored, listclean, listunknown = ignored, clean, unknown
998 modified, added, removed, deleted, unknown = [], [], [], [], []
998 modified, added, removed, deleted, unknown = [], [], [], [], []
999 ignored, clean = [], []
999 ignored, clean = [], []
1000
1000
1001 compareworking = False
1001 compareworking = False
1002 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
1002 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
1003 compareworking = True
1003 compareworking = True
1004
1004
1005 if not compareworking:
1005 if not compareworking:
1006 # read the manifest from node1 before the manifest from node2,
1006 # read the manifest from node1 before the manifest from node2,
1007 # so that we'll hit the manifest cache if we're going through
1007 # so that we'll hit the manifest cache if we're going through
1008 # all the revisions in parent->child order.
1008 # all the revisions in parent->child order.
1009 mf1 = mfmatches(node1)
1009 mf1 = mfmatches(node1)
1010
1010
1011 # are we comparing the working directory?
1011 # are we comparing the working directory?
1012 if not node2:
1012 if not node2:
1013 (lookup, modified, added, removed, deleted, unknown,
1013 (lookup, modified, added, removed, deleted, unknown,
1014 ignored, clean) = self.dirstate.status(match, listignored,
1014 ignored, clean) = self.dirstate.status(match, listignored,
1015 listclean, listunknown)
1015 listclean, listunknown)
1016 # are we comparing working dir against its parent?
1016 # are we comparing working dir against its parent?
1017 if compareworking:
1017 if compareworking:
1018 if lookup:
1018 if lookup:
1019 fixup = []
1019 fixup = []
1020 # do a full compare of any files that might have changed
1020 # do a full compare of any files that might have changed
1021 ctx = self['.']
1021 ctx = self['.']
1022 ff = self.dirstate.flagfunc(ctx.flags)
1022 ff = self.dirstate.flagfunc(ctx.flags)
1023 for f in lookup:
1023 for f in lookup:
1024 if (f not in ctx or ff(f) != ctx.flags(f)
1024 if (f not in ctx or ff(f) != ctx.flags(f)
1025 or ctx[f].cmp(self.wread(f))):
1025 or ctx[f].cmp(self.wread(f))):
1026 modified.append(f)
1026 modified.append(f)
1027 else:
1027 else:
1028 fixup.append(f)
1028 fixup.append(f)
1029 if listclean:
1029 if listclean:
1030 clean.append(f)
1030 clean.append(f)
1031
1031
1032 # update dirstate for files that are actually clean
1032 # update dirstate for files that are actually clean
1033 if fixup:
1033 if fixup:
1034 wlock = None
1034 wlock = None
1035 try:
1035 try:
1036 try:
1036 try:
1037 wlock = self.wlock(False)
1037 wlock = self.wlock(False)
1038 except lock.LockException:
1038 except lock.LockException:
1039 pass
1039 pass
1040 if wlock:
1040 if wlock:
1041 for f in fixup:
1041 for f in fixup:
1042 self.dirstate.normal(f)
1042 self.dirstate.normal(f)
1043 finally:
1043 finally:
1044 del wlock
1044 del wlock
1045 else:
1045 else:
1046 # we are comparing working dir against non-parent
1046 # we are comparing working dir against non-parent
1047 # generate a pseudo-manifest for the working dir
1047 # generate a pseudo-manifest for the working dir
1048 # XXX: create it in dirstate.py ?
1048 # XXX: create it in dirstate.py ?
1049 mf2 = mfmatches(self.dirstate.parents()[0])
1049 mf2 = mfmatches(self.dirstate.parents()[0])
1050 ff = self.dirstate.flagfunc(mf2.flags)
1050 ff = self.dirstate.flagfunc(mf2.flags)
1051 for f in lookup + modified + added:
1051 for f in lookup + modified + added:
1052 mf2[f] = ""
1052 mf2[f] = ""
1053 mf2.set(f, ff(f))
1053 mf2.set(f, ff(f))
1054 for f in removed:
1054 for f in removed:
1055 if f in mf2:
1055 if f in mf2:
1056 del mf2[f]
1056 del mf2[f]
1057
1057
1058 else:
1058 else:
1059 # we are comparing two revisions
1059 # we are comparing two revisions
1060 mf2 = mfmatches(node2)
1060 mf2 = mfmatches(node2)
1061
1061
1062 if not compareworking:
1062 if not compareworking:
1063 # flush lists from dirstate before comparing manifests
1063 # flush lists from dirstate before comparing manifests
1064 modified, added, clean = [], [], []
1064 modified, added, clean = [], [], []
1065
1065
1066 # make sure to sort the files so we talk to the disk in a
1066 # make sure to sort the files so we talk to the disk in a
1067 # reasonable order
1067 # reasonable order
1068 mf2keys = mf2.keys()
1068 mf2keys = mf2.keys()
1069 mf2keys.sort()
1069 mf2keys.sort()
1070 getnode = lambda fn: mf1.get(fn, nullid)
1070 getnode = lambda fn: mf1.get(fn, nullid)
1071 for fn in mf2keys:
1071 for fn in mf2keys:
1072 if fn in mf1:
1072 if fn in mf1:
1073 if (mf1.flags(fn) != mf2.flags(fn) or
1073 if (mf1.flags(fn) != mf2.flags(fn) or
1074 (mf1[fn] != mf2[fn] and
1074 (mf1[fn] != mf2[fn] and
1075 (mf2[fn] != "" or fcmp(fn, getnode)))):
1075 (mf2[fn] != "" or fcmp(fn, getnode)))):
1076 modified.append(fn)
1076 modified.append(fn)
1077 elif listclean:
1077 elif listclean:
1078 clean.append(fn)
1078 clean.append(fn)
1079 del mf1[fn]
1079 del mf1[fn]
1080 else:
1080 else:
1081 added.append(fn)
1081 added.append(fn)
1082
1082
1083 removed = mf1.keys()
1083 removed = mf1.keys()
1084
1084
1085 # sort and return results:
1085 # sort and return results:
1086 for l in modified, added, removed, deleted, unknown, ignored, clean:
1086 for l in modified, added, removed, deleted, unknown, ignored, clean:
1087 l.sort()
1087 l.sort()
1088 return (modified, added, removed, deleted, unknown, ignored, clean)
1088 return (modified, added, removed, deleted, unknown, ignored, clean)
1089
1089
1090 def add(self, list):
1090 def add(self, list):
1091 wlock = self.wlock()
1091 wlock = self.wlock()
1092 try:
1092 try:
1093 rejected = []
1093 rejected = []
1094 for f in list:
1094 for f in list:
1095 p = self.wjoin(f)
1095 p = self.wjoin(f)
1096 try:
1096 try:
1097 st = os.lstat(p)
1097 st = os.lstat(p)
1098 except:
1098 except:
1099 self.ui.warn(_("%s does not exist!\n") % f)
1099 self.ui.warn(_("%s does not exist!\n") % f)
1100 rejected.append(f)
1100 rejected.append(f)
1101 continue
1101 continue
1102 if st.st_size > 10000000:
1102 if st.st_size > 10000000:
1103 self.ui.warn(_("%s: files over 10MB may cause memory and"
1103 self.ui.warn(_("%s: files over 10MB may cause memory and"
1104 " performance problems\n"
1104 " performance problems\n"
1105 "(use 'hg revert %s' to unadd the file)\n")
1105 "(use 'hg revert %s' to unadd the file)\n")
1106 % (f, f))
1106 % (f, f))
1107 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1107 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1108 self.ui.warn(_("%s not added: only files and symlinks "
1108 self.ui.warn(_("%s not added: only files and symlinks "
1109 "supported currently\n") % f)
1109 "supported currently\n") % f)
1110 rejected.append(p)
1110 rejected.append(p)
1111 elif self.dirstate[f] in 'amn':
1111 elif self.dirstate[f] in 'amn':
1112 self.ui.warn(_("%s already tracked!\n") % f)
1112 self.ui.warn(_("%s already tracked!\n") % f)
1113 elif self.dirstate[f] == 'r':
1113 elif self.dirstate[f] == 'r':
1114 self.dirstate.normallookup(f)
1114 self.dirstate.normallookup(f)
1115 else:
1115 else:
1116 self.dirstate.add(f)
1116 self.dirstate.add(f)
1117 return rejected
1117 return rejected
1118 finally:
1118 finally:
1119 del wlock
1119 del wlock
1120
1120
1121 def forget(self, list):
1121 def forget(self, list):
1122 wlock = self.wlock()
1122 wlock = self.wlock()
1123 try:
1123 try:
1124 for f in list:
1124 for f in list:
1125 if self.dirstate[f] != 'a':
1125 if self.dirstate[f] != 'a':
1126 self.ui.warn(_("%s not added!\n") % f)
1126 self.ui.warn(_("%s not added!\n") % f)
1127 else:
1127 else:
1128 self.dirstate.forget(f)
1128 self.dirstate.forget(f)
1129 finally:
1129 finally:
1130 del wlock
1130 del wlock
1131
1131
1132 def remove(self, list, unlink=False):
1132 def remove(self, list, unlink=False):
1133 wlock = None
1133 wlock = None
1134 try:
1134 try:
1135 if unlink:
1135 if unlink:
1136 for f in list:
1136 for f in list:
1137 try:
1137 try:
1138 util.unlink(self.wjoin(f))
1138 util.unlink(self.wjoin(f))
1139 except OSError, inst:
1139 except OSError, inst:
1140 if inst.errno != errno.ENOENT:
1140 if inst.errno != errno.ENOENT:
1141 raise
1141 raise
1142 wlock = self.wlock()
1142 wlock = self.wlock()
1143 for f in list:
1143 for f in list:
1144 if unlink and os.path.exists(self.wjoin(f)):
1144 if unlink and os.path.exists(self.wjoin(f)):
1145 self.ui.warn(_("%s still exists!\n") % f)
1145 self.ui.warn(_("%s still exists!\n") % f)
1146 elif self.dirstate[f] == 'a':
1146 elif self.dirstate[f] == 'a':
1147 self.dirstate.forget(f)
1147 self.dirstate.forget(f)
1148 elif f not in self.dirstate:
1148 elif f not in self.dirstate:
1149 self.ui.warn(_("%s not tracked!\n") % f)
1149 self.ui.warn(_("%s not tracked!\n") % f)
1150 else:
1150 else:
1151 self.dirstate.remove(f)
1151 self.dirstate.remove(f)
1152 finally:
1152 finally:
1153 del wlock
1153 del wlock
1154
1154
1155 def undelete(self, list):
1155 def undelete(self, list):
1156 wlock = None
1156 wlock = None
1157 try:
1157 try:
1158 manifests = [self.manifest.read(self.changelog.read(p)[0])
1158 manifests = [self.manifest.read(self.changelog.read(p)[0])
1159 for p in self.dirstate.parents() if p != nullid]
1159 for p in self.dirstate.parents() if p != nullid]
1160 wlock = self.wlock()
1160 wlock = self.wlock()
1161 for f in list:
1161 for f in list:
1162 if self.dirstate[f] != 'r':
1162 if self.dirstate[f] != 'r':
1163 self.ui.warn("%s not removed!\n" % f)
1163 self.ui.warn("%s not removed!\n" % f)
1164 else:
1164 else:
1165 m = f in manifests[0] and manifests[0] or manifests[1]
1165 m = f in manifests[0] and manifests[0] or manifests[1]
1166 t = self.file(f).read(m[f])
1166 t = self.file(f).read(m[f])
1167 self.wwrite(f, t, m.flags(f))
1167 self.wwrite(f, t, m.flags(f))
1168 self.dirstate.normal(f)
1168 self.dirstate.normal(f)
1169 finally:
1169 finally:
1170 del wlock
1170 del wlock
1171
1171
1172 def copy(self, source, dest):
1172 def copy(self, source, dest):
1173 wlock = None
1173 wlock = None
1174 try:
1174 try:
1175 p = self.wjoin(dest)
1175 p = self.wjoin(dest)
1176 if not (os.path.exists(p) or os.path.islink(p)):
1176 if not (os.path.exists(p) or os.path.islink(p)):
1177 self.ui.warn(_("%s does not exist!\n") % dest)
1177 self.ui.warn(_("%s does not exist!\n") % dest)
1178 elif not (os.path.isfile(p) or os.path.islink(p)):
1178 elif not (os.path.isfile(p) or os.path.islink(p)):
1179 self.ui.warn(_("copy failed: %s is not a file or a "
1179 self.ui.warn(_("copy failed: %s is not a file or a "
1180 "symbolic link\n") % dest)
1180 "symbolic link\n") % dest)
1181 else:
1181 else:
1182 wlock = self.wlock()
1182 wlock = self.wlock()
1183 if dest not in self.dirstate:
1183 if dest not in self.dirstate:
1184 self.dirstate.add(dest)
1184 self.dirstate.add(dest)
1185 self.dirstate.copy(source, dest)
1185 self.dirstate.copy(source, dest)
1186 finally:
1186 finally:
1187 del wlock
1187 del wlock
1188
1188
1189 def heads(self, start=None):
1189 def heads(self, start=None):
1190 heads = self.changelog.heads(start)
1190 heads = self.changelog.heads(start)
1191 # sort the output in rev descending order
1191 # sort the output in rev descending order
1192 heads = [(-self.changelog.rev(h), h) for h in heads]
1192 heads = [(-self.changelog.rev(h), h) for h in heads]
1193 heads.sort()
1193 heads.sort()
1194 return [n for (r, n) in heads]
1194 return [n for (r, n) in heads]
1195
1195
1196 def branchheads(self, branch=None, start=None):
1196 def branchheads(self, branch=None, start=None):
1197 if branch is None:
1197 if branch is None:
1198 branch = self[None].branch()
1198 branch = self[None].branch()
1199 branches = self.branchtags()
1199 branches = self.branchtags()
1200 if branch not in branches:
1200 if branch not in branches:
1201 return []
1201 return []
1202 # The basic algorithm is this:
1202 # The basic algorithm is this:
1203 #
1203 #
1204 # Start from the branch tip since there are no later revisions that can
1204 # Start from the branch tip since there are no later revisions that can
1205 # possibly be in this branch, and the tip is a guaranteed head.
1205 # possibly be in this branch, and the tip is a guaranteed head.
1206 #
1206 #
1207 # Remember the tip's parents as the first ancestors, since these by
1207 # Remember the tip's parents as the first ancestors, since these by
1208 # definition are not heads.
1208 # definition are not heads.
1209 #
1209 #
1210 # Step backwards from the brach tip through all the revisions. We are
1210 # Step backwards from the brach tip through all the revisions. We are
1211 # guaranteed by the rules of Mercurial that we will now be visiting the
1211 # guaranteed by the rules of Mercurial that we will now be visiting the
1212 # nodes in reverse topological order (children before parents).
1212 # nodes in reverse topological order (children before parents).
1213 #
1213 #
1214 # If a revision is one of the ancestors of a head then we can toss it
1214 # If a revision is one of the ancestors of a head then we can toss it
1215 # out of the ancestors set (we've already found it and won't be
1215 # out of the ancestors set (we've already found it and won't be
1216 # visiting it again) and put its parents in the ancestors set.
1216 # visiting it again) and put its parents in the ancestors set.
1217 #
1217 #
1218 # Otherwise, if a revision is in the branch it's another head, since it
1218 # Otherwise, if a revision is in the branch it's another head, since it
1219 # wasn't in the ancestor list of an existing head. So add it to the
1219 # wasn't in the ancestor list of an existing head. So add it to the
1220 # head list, and add its parents to the ancestor list.
1220 # head list, and add its parents to the ancestor list.
1221 #
1221 #
1222 # If it is not in the branch ignore it.
1222 # If it is not in the branch ignore it.
1223 #
1223 #
1224 # Once we have a list of heads, use nodesbetween to filter out all the
1224 # Once we have a list of heads, use nodesbetween to filter out all the
1225 # heads that cannot be reached from startrev. There may be a more
1225 # heads that cannot be reached from startrev. There may be a more
1226 # efficient way to do this as part of the previous algorithm.
1226 # efficient way to do this as part of the previous algorithm.
1227
1227
1228 set = util.set
1228 set = util.set
1229 heads = [self.changelog.rev(branches[branch])]
1229 heads = [self.changelog.rev(branches[branch])]
1230 # Don't care if ancestors contains nullrev or not.
1230 # Don't care if ancestors contains nullrev or not.
1231 ancestors = set(self.changelog.parentrevs(heads[0]))
1231 ancestors = set(self.changelog.parentrevs(heads[0]))
1232 for rev in xrange(heads[0] - 1, nullrev, -1):
1232 for rev in xrange(heads[0] - 1, nullrev, -1):
1233 if rev in ancestors:
1233 if rev in ancestors:
1234 ancestors.update(self.changelog.parentrevs(rev))
1234 ancestors.update(self.changelog.parentrevs(rev))
1235 ancestors.remove(rev)
1235 ancestors.remove(rev)
1236 elif self[rev].branch() == branch:
1236 elif self[rev].branch() == branch:
1237 heads.append(rev)
1237 heads.append(rev)
1238 ancestors.update(self.changelog.parentrevs(rev))
1238 ancestors.update(self.changelog.parentrevs(rev))
1239 heads = [self.changelog.node(rev) for rev in heads]
1239 heads = [self.changelog.node(rev) for rev in heads]
1240 if start is not None:
1240 if start is not None:
1241 heads = self.changelog.nodesbetween([start], heads)[2]
1241 heads = self.changelog.nodesbetween([start], heads)[2]
1242 return heads
1242 return heads
1243
1243
1244 def branches(self, nodes):
1244 def branches(self, nodes):
1245 if not nodes:
1245 if not nodes:
1246 nodes = [self.changelog.tip()]
1246 nodes = [self.changelog.tip()]
1247 b = []
1247 b = []
1248 for n in nodes:
1248 for n in nodes:
1249 t = n
1249 t = n
1250 while 1:
1250 while 1:
1251 p = self.changelog.parents(n)
1251 p = self.changelog.parents(n)
1252 if p[1] != nullid or p[0] == nullid:
1252 if p[1] != nullid or p[0] == nullid:
1253 b.append((t, n, p[0], p[1]))
1253 b.append((t, n, p[0], p[1]))
1254 break
1254 break
1255 n = p[0]
1255 n = p[0]
1256 return b
1256 return b
1257
1257
1258 def between(self, pairs):
1258 def between(self, pairs):
1259 r = []
1259 r = []
1260
1260
1261 for top, bottom in pairs:
1261 for top, bottom in pairs:
1262 n, l, i = top, [], 0
1262 n, l, i = top, [], 0
1263 f = 1
1263 f = 1
1264
1264
1265 while n != bottom:
1265 while n != bottom:
1266 p = self.changelog.parents(n)[0]
1266 p = self.changelog.parents(n)[0]
1267 if i == f:
1267 if i == f:
1268 l.append(n)
1268 l.append(n)
1269 f = f * 2
1269 f = f * 2
1270 n = p
1270 n = p
1271 i += 1
1271 i += 1
1272
1272
1273 r.append(l)
1273 r.append(l)
1274
1274
1275 return r
1275 return r
1276
1276
1277 def findincoming(self, remote, base=None, heads=None, force=False):
1277 def findincoming(self, remote, base=None, heads=None, force=False):
1278 """Return list of roots of the subsets of missing nodes from remote
1278 """Return list of roots of the subsets of missing nodes from remote
1279
1279
1280 If base dict is specified, assume that these nodes and their parents
1280 If base dict is specified, assume that these nodes and their parents
1281 exist on the remote side and that no child of a node of base exists
1281 exist on the remote side and that no child of a node of base exists
1282 in both remote and self.
1282 in both remote and self.
1283 Furthermore base will be updated to include the nodes that exists
1283 Furthermore base will be updated to include the nodes that exists
1284 in self and remote but no children exists in self and remote.
1284 in self and remote but no children exists in self and remote.
1285 If a list of heads is specified, return only nodes which are heads
1285 If a list of heads is specified, return only nodes which are heads
1286 or ancestors of these heads.
1286 or ancestors of these heads.
1287
1287
1288 All the ancestors of base are in self and in remote.
1288 All the ancestors of base are in self and in remote.
1289 All the descendants of the list returned are missing in self.
1289 All the descendants of the list returned are missing in self.
1290 (and so we know that the rest of the nodes are missing in remote, see
1290 (and so we know that the rest of the nodes are missing in remote, see
1291 outgoing)
1291 outgoing)
1292 """
1292 """
1293 m = self.changelog.nodemap
1293 m = self.changelog.nodemap
1294 search = []
1294 search = []
1295 fetch = {}
1295 fetch = {}
1296 seen = {}
1296 seen = {}
1297 seenbranch = {}
1297 seenbranch = {}
1298 if base == None:
1298 if base == None:
1299 base = {}
1299 base = {}
1300
1300
1301 if not heads:
1301 if not heads:
1302 heads = remote.heads()
1302 heads = remote.heads()
1303
1303
1304 if self.changelog.tip() == nullid:
1304 if self.changelog.tip() == nullid:
1305 base[nullid] = 1
1305 base[nullid] = 1
1306 if heads != [nullid]:
1306 if heads != [nullid]:
1307 return [nullid]
1307 return [nullid]
1308 return []
1308 return []
1309
1309
1310 # assume we're closer to the tip than the root
1310 # assume we're closer to the tip than the root
1311 # and start by examining the heads
1311 # and start by examining the heads
1312 self.ui.status(_("searching for changes\n"))
1312 self.ui.status(_("searching for changes\n"))
1313
1313
1314 unknown = []
1314 unknown = []
1315 for h in heads:
1315 for h in heads:
1316 if h not in m:
1316 if h not in m:
1317 unknown.append(h)
1317 unknown.append(h)
1318 else:
1318 else:
1319 base[h] = 1
1319 base[h] = 1
1320
1320
1321 if not unknown:
1321 if not unknown:
1322 return []
1322 return []
1323
1323
1324 req = dict.fromkeys(unknown)
1324 req = dict.fromkeys(unknown)
1325 reqcnt = 0
1325 reqcnt = 0
1326
1326
1327 # search through remote branches
1327 # search through remote branches
1328 # a 'branch' here is a linear segment of history, with four parts:
1328 # a 'branch' here is a linear segment of history, with four parts:
1329 # head, root, first parent, second parent
1329 # head, root, first parent, second parent
1330 # (a branch always has two parents (or none) by definition)
1330 # (a branch always has two parents (or none) by definition)
1331 unknown = remote.branches(unknown)
1331 unknown = remote.branches(unknown)
1332 while unknown:
1332 while unknown:
1333 r = []
1333 r = []
1334 while unknown:
1334 while unknown:
1335 n = unknown.pop(0)
1335 n = unknown.pop(0)
1336 if n[0] in seen:
1336 if n[0] in seen:
1337 continue
1337 continue
1338
1338
1339 self.ui.debug(_("examining %s:%s\n")
1339 self.ui.debug(_("examining %s:%s\n")
1340 % (short(n[0]), short(n[1])))
1340 % (short(n[0]), short(n[1])))
1341 if n[0] == nullid: # found the end of the branch
1341 if n[0] == nullid: # found the end of the branch
1342 pass
1342 pass
1343 elif n in seenbranch:
1343 elif n in seenbranch:
1344 self.ui.debug(_("branch already found\n"))
1344 self.ui.debug(_("branch already found\n"))
1345 continue
1345 continue
1346 elif n[1] and n[1] in m: # do we know the base?
1346 elif n[1] and n[1] in m: # do we know the base?
1347 self.ui.debug(_("found incomplete branch %s:%s\n")
1347 self.ui.debug(_("found incomplete branch %s:%s\n")
1348 % (short(n[0]), short(n[1])))
1348 % (short(n[0]), short(n[1])))
1349 search.append(n) # schedule branch range for scanning
1349 search.append(n) # schedule branch range for scanning
1350 seenbranch[n] = 1
1350 seenbranch[n] = 1
1351 else:
1351 else:
1352 if n[1] not in seen and n[1] not in fetch:
1352 if n[1] not in seen and n[1] not in fetch:
1353 if n[2] in m and n[3] in m:
1353 if n[2] in m and n[3] in m:
1354 self.ui.debug(_("found new changeset %s\n") %
1354 self.ui.debug(_("found new changeset %s\n") %
1355 short(n[1]))
1355 short(n[1]))
1356 fetch[n[1]] = 1 # earliest unknown
1356 fetch[n[1]] = 1 # earliest unknown
1357 for p in n[2:4]:
1357 for p in n[2:4]:
1358 if p in m:
1358 if p in m:
1359 base[p] = 1 # latest known
1359 base[p] = 1 # latest known
1360
1360
1361 for p in n[2:4]:
1361 for p in n[2:4]:
1362 if p not in req and p not in m:
1362 if p not in req and p not in m:
1363 r.append(p)
1363 r.append(p)
1364 req[p] = 1
1364 req[p] = 1
1365 seen[n[0]] = 1
1365 seen[n[0]] = 1
1366
1366
1367 if r:
1367 if r:
1368 reqcnt += 1
1368 reqcnt += 1
1369 self.ui.debug(_("request %d: %s\n") %
1369 self.ui.debug(_("request %d: %s\n") %
1370 (reqcnt, " ".join(map(short, r))))
1370 (reqcnt, " ".join(map(short, r))))
1371 for p in xrange(0, len(r), 10):
1371 for p in xrange(0, len(r), 10):
1372 for b in remote.branches(r[p:p+10]):
1372 for b in remote.branches(r[p:p+10]):
1373 self.ui.debug(_("received %s:%s\n") %
1373 self.ui.debug(_("received %s:%s\n") %
1374 (short(b[0]), short(b[1])))
1374 (short(b[0]), short(b[1])))
1375 unknown.append(b)
1375 unknown.append(b)
1376
1376
1377 # do binary search on the branches we found
1377 # do binary search on the branches we found
1378 while search:
1378 while search:
1379 n = search.pop(0)
1379 n = search.pop(0)
1380 reqcnt += 1
1380 reqcnt += 1
1381 l = remote.between([(n[0], n[1])])[0]
1381 l = remote.between([(n[0], n[1])])[0]
1382 l.append(n[1])
1382 l.append(n[1])
1383 p = n[0]
1383 p = n[0]
1384 f = 1
1384 f = 1
1385 for i in l:
1385 for i in l:
1386 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1386 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1387 if i in m:
1387 if i in m:
1388 if f <= 2:
1388 if f <= 2:
1389 self.ui.debug(_("found new branch changeset %s\n") %
1389 self.ui.debug(_("found new branch changeset %s\n") %
1390 short(p))
1390 short(p))
1391 fetch[p] = 1
1391 fetch[p] = 1
1392 base[i] = 1
1392 base[i] = 1
1393 else:
1393 else:
1394 self.ui.debug(_("narrowed branch search to %s:%s\n")
1394 self.ui.debug(_("narrowed branch search to %s:%s\n")
1395 % (short(p), short(i)))
1395 % (short(p), short(i)))
1396 search.append((p, i))
1396 search.append((p, i))
1397 break
1397 break
1398 p, f = i, f * 2
1398 p, f = i, f * 2
1399
1399
1400 # sanity check our fetch list
1400 # sanity check our fetch list
1401 for f in fetch.keys():
1401 for f in fetch.keys():
1402 if f in m:
1402 if f in m:
1403 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1403 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1404
1404
1405 if base.keys() == [nullid]:
1405 if base.keys() == [nullid]:
1406 if force:
1406 if force:
1407 self.ui.warn(_("warning: repository is unrelated\n"))
1407 self.ui.warn(_("warning: repository is unrelated\n"))
1408 else:
1408 else:
1409 raise util.Abort(_("repository is unrelated"))
1409 raise util.Abort(_("repository is unrelated"))
1410
1410
1411 self.ui.debug(_("found new changesets starting at ") +
1411 self.ui.debug(_("found new changesets starting at ") +
1412 " ".join([short(f) for f in fetch]) + "\n")
1412 " ".join([short(f) for f in fetch]) + "\n")
1413
1413
1414 self.ui.debug(_("%d total queries\n") % reqcnt)
1414 self.ui.debug(_("%d total queries\n") % reqcnt)
1415
1415
1416 return fetch.keys()
1416 return fetch.keys()
1417
1417
1418 def findoutgoing(self, remote, base=None, heads=None, force=False):
1418 def findoutgoing(self, remote, base=None, heads=None, force=False):
1419 """Return list of nodes that are roots of subsets not in remote
1419 """Return list of nodes that are roots of subsets not in remote
1420
1420
1421 If base dict is specified, assume that these nodes and their parents
1421 If base dict is specified, assume that these nodes and their parents
1422 exist on the remote side.
1422 exist on the remote side.
1423 If a list of heads is specified, return only nodes which are heads
1423 If a list of heads is specified, return only nodes which are heads
1424 or ancestors of these heads, and return a second element which
1424 or ancestors of these heads, and return a second element which
1425 contains all remote heads which get new children.
1425 contains all remote heads which get new children.
1426 """
1426 """
1427 if base == None:
1427 if base == None:
1428 base = {}
1428 base = {}
1429 self.findincoming(remote, base, heads, force=force)
1429 self.findincoming(remote, base, heads, force=force)
1430
1430
1431 self.ui.debug(_("common changesets up to ")
1431 self.ui.debug(_("common changesets up to ")
1432 + " ".join(map(short, base.keys())) + "\n")
1432 + " ".join(map(short, base.keys())) + "\n")
1433
1433
1434 remain = dict.fromkeys(self.changelog.nodemap)
1434 remain = dict.fromkeys(self.changelog.nodemap)
1435
1435
1436 # prune everything remote has from the tree
1436 # prune everything remote has from the tree
1437 del remain[nullid]
1437 del remain[nullid]
1438 remove = base.keys()
1438 remove = base.keys()
1439 while remove:
1439 while remove:
1440 n = remove.pop(0)
1440 n = remove.pop(0)
1441 if n in remain:
1441 if n in remain:
1442 del remain[n]
1442 del remain[n]
1443 for p in self.changelog.parents(n):
1443 for p in self.changelog.parents(n):
1444 remove.append(p)
1444 remove.append(p)
1445
1445
1446 # find every node whose parents have been pruned
1446 # find every node whose parents have been pruned
1447 subset = []
1447 subset = []
1448 # find every remote head that will get new children
1448 # find every remote head that will get new children
1449 updated_heads = {}
1449 updated_heads = {}
1450 for n in remain:
1450 for n in remain:
1451 p1, p2 = self.changelog.parents(n)
1451 p1, p2 = self.changelog.parents(n)
1452 if p1 not in remain and p2 not in remain:
1452 if p1 not in remain and p2 not in remain:
1453 subset.append(n)
1453 subset.append(n)
1454 if heads:
1454 if heads:
1455 if p1 in heads:
1455 if p1 in heads:
1456 updated_heads[p1] = True
1456 updated_heads[p1] = True
1457 if p2 in heads:
1457 if p2 in heads:
1458 updated_heads[p2] = True
1458 updated_heads[p2] = True
1459
1459
1460 # this is the set of all roots we have to push
1460 # this is the set of all roots we have to push
1461 if heads:
1461 if heads:
1462 return subset, updated_heads.keys()
1462 return subset, updated_heads.keys()
1463 else:
1463 else:
1464 return subset
1464 return subset
1465
1465
1466 def pull(self, remote, heads=None, force=False):
1466 def pull(self, remote, heads=None, force=False):
1467 lock = self.lock()
1467 lock = self.lock()
1468 try:
1468 try:
1469 fetch = self.findincoming(remote, heads=heads, force=force)
1469 fetch = self.findincoming(remote, heads=heads, force=force)
1470 if fetch == [nullid]:
1470 if fetch == [nullid]:
1471 self.ui.status(_("requesting all changes\n"))
1471 self.ui.status(_("requesting all changes\n"))
1472
1472
1473 if not fetch:
1473 if not fetch:
1474 self.ui.status(_("no changes found\n"))
1474 self.ui.status(_("no changes found\n"))
1475 return 0
1475 return 0
1476
1476
1477 if heads is None:
1477 if heads is None:
1478 cg = remote.changegroup(fetch, 'pull')
1478 cg = remote.changegroup(fetch, 'pull')
1479 else:
1479 else:
1480 if 'changegroupsubset' not in remote.capabilities:
1480 if 'changegroupsubset' not in remote.capabilities:
1481 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1481 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1482 cg = remote.changegroupsubset(fetch, heads, 'pull')
1482 cg = remote.changegroupsubset(fetch, heads, 'pull')
1483 return self.addchangegroup(cg, 'pull', remote.url())
1483 return self.addchangegroup(cg, 'pull', remote.url())
1484 finally:
1484 finally:
1485 del lock
1485 del lock
1486
1486
1487 def push(self, remote, force=False, revs=None):
1487 def push(self, remote, force=False, revs=None):
1488 # there are two ways to push to remote repo:
1488 # there are two ways to push to remote repo:
1489 #
1489 #
1490 # addchangegroup assumes local user can lock remote
1490 # addchangegroup assumes local user can lock remote
1491 # repo (local filesystem, old ssh servers).
1491 # repo (local filesystem, old ssh servers).
1492 #
1492 #
1493 # unbundle assumes local user cannot lock remote repo (new ssh
1493 # unbundle assumes local user cannot lock remote repo (new ssh
1494 # servers, http servers).
1494 # servers, http servers).
1495
1495
1496 if remote.capable('unbundle'):
1496 if remote.capable('unbundle'):
1497 return self.push_unbundle(remote, force, revs)
1497 return self.push_unbundle(remote, force, revs)
1498 return self.push_addchangegroup(remote, force, revs)
1498 return self.push_addchangegroup(remote, force, revs)
1499
1499
1500 def prepush(self, remote, force, revs):
1500 def prepush(self, remote, force, revs):
1501 base = {}
1501 base = {}
1502 remote_heads = remote.heads()
1502 remote_heads = remote.heads()
1503 inc = self.findincoming(remote, base, remote_heads, force=force)
1503 inc = self.findincoming(remote, base, remote_heads, force=force)
1504
1504
1505 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1505 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1506 if revs is not None:
1506 if revs is not None:
1507 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1507 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1508 else:
1508 else:
1509 bases, heads = update, self.changelog.heads()
1509 bases, heads = update, self.changelog.heads()
1510
1510
1511 if not bases:
1511 if not bases:
1512 self.ui.status(_("no changes found\n"))
1512 self.ui.status(_("no changes found\n"))
1513 return None, 1
1513 return None, 1
1514 elif not force:
1514 elif not force:
1515 # check if we're creating new remote heads
1515 # check if we're creating new remote heads
1516 # to be a remote head after push, node must be either
1516 # to be a remote head after push, node must be either
1517 # - unknown locally
1517 # - unknown locally
1518 # - a local outgoing head descended from update
1518 # - a local outgoing head descended from update
1519 # - a remote head that's known locally and not
1519 # - a remote head that's known locally and not
1520 # ancestral to an outgoing head
1520 # ancestral to an outgoing head
1521
1521
1522 warn = 0
1522 warn = 0
1523
1523
1524 if remote_heads == [nullid]:
1524 if remote_heads == [nullid]:
1525 warn = 0
1525 warn = 0
1526 elif not revs and len(heads) > len(remote_heads):
1526 elif not revs and len(heads) > len(remote_heads):
1527 warn = 1
1527 warn = 1
1528 else:
1528 else:
1529 newheads = list(heads)
1529 newheads = list(heads)
1530 for r in remote_heads:
1530 for r in remote_heads:
1531 if r in self.changelog.nodemap:
1531 if r in self.changelog.nodemap:
1532 desc = self.changelog.heads(r, heads)
1532 desc = self.changelog.heads(r, heads)
1533 l = [h for h in heads if h in desc]
1533 l = [h for h in heads if h in desc]
1534 if not l:
1534 if not l:
1535 newheads.append(r)
1535 newheads.append(r)
1536 else:
1536 else:
1537 newheads.append(r)
1537 newheads.append(r)
1538 if len(newheads) > len(remote_heads):
1538 if len(newheads) > len(remote_heads):
1539 warn = 1
1539 warn = 1
1540
1540
1541 if warn:
1541 if warn:
1542 self.ui.warn(_("abort: push creates new remote heads!\n"))
1542 self.ui.warn(_("abort: push creates new remote heads!\n"))
1543 self.ui.status(_("(did you forget to merge?"
1543 self.ui.status(_("(did you forget to merge?"
1544 " use push -f to force)\n"))
1544 " use push -f to force)\n"))
1545 return None, 0
1545 return None, 0
1546 elif inc:
1546 elif inc:
1547 self.ui.warn(_("note: unsynced remote changes!\n"))
1547 self.ui.warn(_("note: unsynced remote changes!\n"))
1548
1548
1549
1549
1550 if revs is None:
1550 if revs is None:
1551 cg = self.changegroup(update, 'push')
1551 cg = self.changegroup(update, 'push')
1552 else:
1552 else:
1553 cg = self.changegroupsubset(update, revs, 'push')
1553 cg = self.changegroupsubset(update, revs, 'push')
1554 return cg, remote_heads
1554 return cg, remote_heads
1555
1555
1556 def push_addchangegroup(self, remote, force, revs):
1556 def push_addchangegroup(self, remote, force, revs):
1557 lock = remote.lock()
1557 lock = remote.lock()
1558 try:
1558 try:
1559 ret = self.prepush(remote, force, revs)
1559 ret = self.prepush(remote, force, revs)
1560 if ret[0] is not None:
1560 if ret[0] is not None:
1561 cg, remote_heads = ret
1561 cg, remote_heads = ret
1562 return remote.addchangegroup(cg, 'push', self.url())
1562 return remote.addchangegroup(cg, 'push', self.url())
1563 return ret[1]
1563 return ret[1]
1564 finally:
1564 finally:
1565 del lock
1565 del lock
1566
1566
1567 def push_unbundle(self, remote, force, revs):
1567 def push_unbundle(self, remote, force, revs):
1568 # local repo finds heads on server, finds out what revs it
1568 # local repo finds heads on server, finds out what revs it
1569 # must push. once revs transferred, if server finds it has
1569 # must push. once revs transferred, if server finds it has
1570 # different heads (someone else won commit/push race), server
1570 # different heads (someone else won commit/push race), server
1571 # aborts.
1571 # aborts.
1572
1572
1573 ret = self.prepush(remote, force, revs)
1573 ret = self.prepush(remote, force, revs)
1574 if ret[0] is not None:
1574 if ret[0] is not None:
1575 cg, remote_heads = ret
1575 cg, remote_heads = ret
1576 if force: remote_heads = ['force']
1576 if force: remote_heads = ['force']
1577 return remote.unbundle(cg, remote_heads, 'push')
1577 return remote.unbundle(cg, remote_heads, 'push')
1578 return ret[1]
1578 return ret[1]
1579
1579
1580 def changegroupinfo(self, nodes, source):
1580 def changegroupinfo(self, nodes, source):
1581 if self.ui.verbose or source == 'bundle':
1581 if self.ui.verbose or source == 'bundle':
1582 self.ui.status(_("%d changesets found\n") % len(nodes))
1582 self.ui.status(_("%d changesets found\n") % len(nodes))
1583 if self.ui.debugflag:
1583 if self.ui.debugflag:
1584 self.ui.debug(_("List of changesets:\n"))
1584 self.ui.debug(_("List of changesets:\n"))
1585 for node in nodes:
1585 for node in nodes:
1586 self.ui.debug("%s\n" % hex(node))
1586 self.ui.debug("%s\n" % hex(node))
1587
1587
1588 def changegroupsubset(self, bases, heads, source, extranodes=None):
1588 def changegroupsubset(self, bases, heads, source, extranodes=None):
1589 """This function generates a changegroup consisting of all the nodes
1589 """This function generates a changegroup consisting of all the nodes
1590 that are descendents of any of the bases, and ancestors of any of
1590 that are descendents of any of the bases, and ancestors of any of
1591 the heads.
1591 the heads.
1592
1592
1593 It is fairly complex as determining which filenodes and which
1593 It is fairly complex as determining which filenodes and which
1594 manifest nodes need to be included for the changeset to be complete
1594 manifest nodes need to be included for the changeset to be complete
1595 is non-trivial.
1595 is non-trivial.
1596
1596
1597 Another wrinkle is doing the reverse, figuring out which changeset in
1597 Another wrinkle is doing the reverse, figuring out which changeset in
1598 the changegroup a particular filenode or manifestnode belongs to.
1598 the changegroup a particular filenode or manifestnode belongs to.
1599
1599
1600 The caller can specify some nodes that must be included in the
1600 The caller can specify some nodes that must be included in the
1601 changegroup using the extranodes argument. It should be a dict
1601 changegroup using the extranodes argument. It should be a dict
1602 where the keys are the filenames (or 1 for the manifest), and the
1602 where the keys are the filenames (or 1 for the manifest), and the
1603 values are lists of (node, linknode) tuples, where node is a wanted
1603 values are lists of (node, linknode) tuples, where node is a wanted
1604 node and linknode is the changelog node that should be transmitted as
1604 node and linknode is the changelog node that should be transmitted as
1605 the linkrev.
1605 the linkrev.
1606 """
1606 """
1607
1607
1608 self.hook('preoutgoing', throw=True, source=source)
1608 self.hook('preoutgoing', throw=True, source=source)
1609
1609
1610 # Set up some initial variables
1610 # Set up some initial variables
1611 # Make it easy to refer to self.changelog
1611 # Make it easy to refer to self.changelog
1612 cl = self.changelog
1612 cl = self.changelog
1613 # msng is short for missing - compute the list of changesets in this
1613 # msng is short for missing - compute the list of changesets in this
1614 # changegroup.
1614 # changegroup.
1615 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1615 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1616 self.changegroupinfo(msng_cl_lst, source)
1616 self.changegroupinfo(msng_cl_lst, source)
1617 # Some bases may turn out to be superfluous, and some heads may be
1617 # Some bases may turn out to be superfluous, and some heads may be
1618 # too. nodesbetween will return the minimal set of bases and heads
1618 # too. nodesbetween will return the minimal set of bases and heads
1619 # necessary to re-create the changegroup.
1619 # necessary to re-create the changegroup.
1620
1620
1621 # Known heads are the list of heads that it is assumed the recipient
1621 # Known heads are the list of heads that it is assumed the recipient
1622 # of this changegroup will know about.
1622 # of this changegroup will know about.
1623 knownheads = {}
1623 knownheads = {}
1624 # We assume that all parents of bases are known heads.
1624 # We assume that all parents of bases are known heads.
1625 for n in bases:
1625 for n in bases:
1626 for p in cl.parents(n):
1626 for p in cl.parents(n):
1627 if p != nullid:
1627 if p != nullid:
1628 knownheads[p] = 1
1628 knownheads[p] = 1
1629 knownheads = knownheads.keys()
1629 knownheads = knownheads.keys()
1630 if knownheads:
1630 if knownheads:
1631 # Now that we know what heads are known, we can compute which
1631 # Now that we know what heads are known, we can compute which
1632 # changesets are known. The recipient must know about all
1632 # changesets are known. The recipient must know about all
1633 # changesets required to reach the known heads from the null
1633 # changesets required to reach the known heads from the null
1634 # changeset.
1634 # changeset.
1635 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1635 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1636 junk = None
1636 junk = None
1637 # Transform the list into an ersatz set.
1637 # Transform the list into an ersatz set.
1638 has_cl_set = dict.fromkeys(has_cl_set)
1638 has_cl_set = dict.fromkeys(has_cl_set)
1639 else:
1639 else:
1640 # If there were no known heads, the recipient cannot be assumed to
1640 # If there were no known heads, the recipient cannot be assumed to
1641 # know about any changesets.
1641 # know about any changesets.
1642 has_cl_set = {}
1642 has_cl_set = {}
1643
1643
1644 # Make it easy to refer to self.manifest
1644 # Make it easy to refer to self.manifest
1645 mnfst = self.manifest
1645 mnfst = self.manifest
1646 # We don't know which manifests are missing yet
1646 # We don't know which manifests are missing yet
1647 msng_mnfst_set = {}
1647 msng_mnfst_set = {}
1648 # Nor do we know which filenodes are missing.
1648 # Nor do we know which filenodes are missing.
1649 msng_filenode_set = {}
1649 msng_filenode_set = {}
1650
1650
1651 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1651 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1652 junk = None
1652 junk = None
1653
1653
1654 # A changeset always belongs to itself, so the changenode lookup
1654 # A changeset always belongs to itself, so the changenode lookup
1655 # function for a changenode is identity.
1655 # function for a changenode is identity.
1656 def identity(x):
1656 def identity(x):
1657 return x
1657 return x
1658
1658
1659 # A function generating function. Sets up an environment for the
1659 # A function generating function. Sets up an environment for the
1660 # inner function.
1660 # inner function.
1661 def cmp_by_rev_func(revlog):
1661 def cmp_by_rev_func(revlog):
1662 # Compare two nodes by their revision number in the environment's
1662 # Compare two nodes by their revision number in the environment's
1663 # revision history. Since the revision number both represents the
1663 # revision history. Since the revision number both represents the
1664 # most efficient order to read the nodes in, and represents a
1664 # most efficient order to read the nodes in, and represents a
1665 # topological sorting of the nodes, this function is often useful.
1665 # topological sorting of the nodes, this function is often useful.
1666 def cmp_by_rev(a, b):
1666 def cmp_by_rev(a, b):
1667 return cmp(revlog.rev(a), revlog.rev(b))
1667 return cmp(revlog.rev(a), revlog.rev(b))
1668 return cmp_by_rev
1668 return cmp_by_rev
1669
1669
1670 # If we determine that a particular file or manifest node must be a
1670 # If we determine that a particular file or manifest node must be a
1671 # node that the recipient of the changegroup will already have, we can
1671 # node that the recipient of the changegroup will already have, we can
1672 # also assume the recipient will have all the parents. This function
1672 # also assume the recipient will have all the parents. This function
1673 # prunes them from the set of missing nodes.
1673 # prunes them from the set of missing nodes.
1674 def prune_parents(revlog, hasset, msngset):
1674 def prune_parents(revlog, hasset, msngset):
1675 haslst = hasset.keys()
1675 haslst = hasset.keys()
1676 haslst.sort(cmp_by_rev_func(revlog))
1676 haslst.sort(cmp_by_rev_func(revlog))
1677 for node in haslst:
1677 for node in haslst:
1678 parentlst = [p for p in revlog.parents(node) if p != nullid]
1678 parentlst = [p for p in revlog.parents(node) if p != nullid]
1679 while parentlst:
1679 while parentlst:
1680 n = parentlst.pop()
1680 n = parentlst.pop()
1681 if n not in hasset:
1681 if n not in hasset:
1682 hasset[n] = 1
1682 hasset[n] = 1
1683 p = [p for p in revlog.parents(n) if p != nullid]
1683 p = [p for p in revlog.parents(n) if p != nullid]
1684 parentlst.extend(p)
1684 parentlst.extend(p)
1685 for n in hasset:
1685 for n in hasset:
1686 msngset.pop(n, None)
1686 msngset.pop(n, None)
1687
1687
1688 # This is a function generating function used to set up an environment
1688 # This is a function generating function used to set up an environment
1689 # for the inner function to execute in.
1689 # for the inner function to execute in.
1690 def manifest_and_file_collector(changedfileset):
1690 def manifest_and_file_collector(changedfileset):
1691 # This is an information gathering function that gathers
1691 # This is an information gathering function that gathers
1692 # information from each changeset node that goes out as part of
1692 # information from each changeset node that goes out as part of
1693 # the changegroup. The information gathered is a list of which
1693 # the changegroup. The information gathered is a list of which
1694 # manifest nodes are potentially required (the recipient may
1694 # manifest nodes are potentially required (the recipient may
1695 # already have them) and total list of all files which were
1695 # already have them) and total list of all files which were
1696 # changed in any changeset in the changegroup.
1696 # changed in any changeset in the changegroup.
1697 #
1697 #
1698 # We also remember the first changenode we saw any manifest
1698 # We also remember the first changenode we saw any manifest
1699 # referenced by so we can later determine which changenode 'owns'
1699 # referenced by so we can later determine which changenode 'owns'
1700 # the manifest.
1700 # the manifest.
1701 def collect_manifests_and_files(clnode):
1701 def collect_manifests_and_files(clnode):
1702 c = cl.read(clnode)
1702 c = cl.read(clnode)
1703 for f in c[3]:
1703 for f in c[3]:
1704 # This is to make sure we only have one instance of each
1704 # This is to make sure we only have one instance of each
1705 # filename string for each filename.
1705 # filename string for each filename.
1706 changedfileset.setdefault(f, f)
1706 changedfileset.setdefault(f, f)
1707 msng_mnfst_set.setdefault(c[0], clnode)
1707 msng_mnfst_set.setdefault(c[0], clnode)
1708 return collect_manifests_and_files
1708 return collect_manifests_and_files
1709
1709
1710 # Figure out which manifest nodes (of the ones we think might be part
1710 # Figure out which manifest nodes (of the ones we think might be part
1711 # of the changegroup) the recipient must know about and remove them
1711 # of the changegroup) the recipient must know about and remove them
1712 # from the changegroup.
1712 # from the changegroup.
1713 def prune_manifests():
1713 def prune_manifests():
1714 has_mnfst_set = {}
1714 has_mnfst_set = {}
1715 for n in msng_mnfst_set:
1715 for n in msng_mnfst_set:
1716 # If a 'missing' manifest thinks it belongs to a changenode
1716 # If a 'missing' manifest thinks it belongs to a changenode
1717 # the recipient is assumed to have, obviously the recipient
1717 # the recipient is assumed to have, obviously the recipient
1718 # must have that manifest.
1718 # must have that manifest.
1719 linknode = cl.node(mnfst.linkrev(n))
1719 linknode = cl.node(mnfst.linkrev(n))
1720 if linknode in has_cl_set:
1720 if linknode in has_cl_set:
1721 has_mnfst_set[n] = 1
1721 has_mnfst_set[n] = 1
1722 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1722 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1723
1723
1724 # Use the information collected in collect_manifests_and_files to say
1724 # Use the information collected in collect_manifests_and_files to say
1725 # which changenode any manifestnode belongs to.
1725 # which changenode any manifestnode belongs to.
1726 def lookup_manifest_link(mnfstnode):
1726 def lookup_manifest_link(mnfstnode):
1727 return msng_mnfst_set[mnfstnode]
1727 return msng_mnfst_set[mnfstnode]
1728
1728
1729 # A function generating function that sets up the initial environment
1729 # A function generating function that sets up the initial environment
1730 # the inner function.
1730 # the inner function.
1731 def filenode_collector(changedfiles):
1731 def filenode_collector(changedfiles):
1732 next_rev = [0]
1732 next_rev = [0]
1733 # This gathers information from each manifestnode included in the
1733 # This gathers information from each manifestnode included in the
1734 # changegroup about which filenodes the manifest node references
1734 # changegroup about which filenodes the manifest node references
1735 # so we can include those in the changegroup too.
1735 # so we can include those in the changegroup too.
1736 #
1736 #
1737 # It also remembers which changenode each filenode belongs to. It
1737 # It also remembers which changenode each filenode belongs to. It
1738 # does this by assuming the a filenode belongs to the changenode
1738 # does this by assuming the a filenode belongs to the changenode
1739 # the first manifest that references it belongs to.
1739 # the first manifest that references it belongs to.
1740 def collect_msng_filenodes(mnfstnode):
1740 def collect_msng_filenodes(mnfstnode):
1741 r = mnfst.rev(mnfstnode)
1741 r = mnfst.rev(mnfstnode)
1742 if r == next_rev[0]:
1742 if r == next_rev[0]:
1743 # If the last rev we looked at was the one just previous,
1743 # If the last rev we looked at was the one just previous,
1744 # we only need to see a diff.
1744 # we only need to see a diff.
1745 deltamf = mnfst.readdelta(mnfstnode)
1745 deltamf = mnfst.readdelta(mnfstnode)
1746 # For each line in the delta
1746 # For each line in the delta
1747 for f, fnode in deltamf.items():
1747 for f, fnode in deltamf.items():
1748 f = changedfiles.get(f, None)
1748 f = changedfiles.get(f, None)
1749 # And if the file is in the list of files we care
1749 # And if the file is in the list of files we care
1750 # about.
1750 # about.
1751 if f is not None:
1751 if f is not None:
1752 # Get the changenode this manifest belongs to
1752 # Get the changenode this manifest belongs to
1753 clnode = msng_mnfst_set[mnfstnode]
1753 clnode = msng_mnfst_set[mnfstnode]
1754 # Create the set of filenodes for the file if
1754 # Create the set of filenodes for the file if
1755 # there isn't one already.
1755 # there isn't one already.
1756 ndset = msng_filenode_set.setdefault(f, {})
1756 ndset = msng_filenode_set.setdefault(f, {})
1757 # And set the filenode's changelog node to the
1757 # And set the filenode's changelog node to the
1758 # manifest's if it hasn't been set already.
1758 # manifest's if it hasn't been set already.
1759 ndset.setdefault(fnode, clnode)
1759 ndset.setdefault(fnode, clnode)
1760 else:
1760 else:
1761 # Otherwise we need a full manifest.
1761 # Otherwise we need a full manifest.
1762 m = mnfst.read(mnfstnode)
1762 m = mnfst.read(mnfstnode)
1763 # For every file in we care about.
1763 # For every file in we care about.
1764 for f in changedfiles:
1764 for f in changedfiles:
1765 fnode = m.get(f, None)
1765 fnode = m.get(f, None)
1766 # If it's in the manifest
1766 # If it's in the manifest
1767 if fnode is not None:
1767 if fnode is not None:
1768 # See comments above.
1768 # See comments above.
1769 clnode = msng_mnfst_set[mnfstnode]
1769 clnode = msng_mnfst_set[mnfstnode]
1770 ndset = msng_filenode_set.setdefault(f, {})
1770 ndset = msng_filenode_set.setdefault(f, {})
1771 ndset.setdefault(fnode, clnode)
1771 ndset.setdefault(fnode, clnode)
1772 # Remember the revision we hope to see next.
1772 # Remember the revision we hope to see next.
1773 next_rev[0] = r + 1
1773 next_rev[0] = r + 1
1774 return collect_msng_filenodes
1774 return collect_msng_filenodes
1775
1775
1776 # We have a list of filenodes we think we need for a file, lets remove
1776 # We have a list of filenodes we think we need for a file, lets remove
1777 # all those we now the recipient must have.
1777 # all those we now the recipient must have.
1778 def prune_filenodes(f, filerevlog):
1778 def prune_filenodes(f, filerevlog):
1779 msngset = msng_filenode_set[f]
1779 msngset = msng_filenode_set[f]
1780 hasset = {}
1780 hasset = {}
1781 # If a 'missing' filenode thinks it belongs to a changenode we
1781 # If a 'missing' filenode thinks it belongs to a changenode we
1782 # assume the recipient must have, then the recipient must have
1782 # assume the recipient must have, then the recipient must have
1783 # that filenode.
1783 # that filenode.
1784 for n in msngset:
1784 for n in msngset:
1785 clnode = cl.node(filerevlog.linkrev(n))
1785 clnode = cl.node(filerevlog.linkrev(n))
1786 if clnode in has_cl_set:
1786 if clnode in has_cl_set:
1787 hasset[n] = 1
1787 hasset[n] = 1
1788 prune_parents(filerevlog, hasset, msngset)
1788 prune_parents(filerevlog, hasset, msngset)
1789
1789
1790 # A function generator function that sets up the a context for the
1790 # A function generator function that sets up the a context for the
1791 # inner function.
1791 # inner function.
1792 def lookup_filenode_link_func(fname):
1792 def lookup_filenode_link_func(fname):
1793 msngset = msng_filenode_set[fname]
1793 msngset = msng_filenode_set[fname]
1794 # Lookup the changenode the filenode belongs to.
1794 # Lookup the changenode the filenode belongs to.
1795 def lookup_filenode_link(fnode):
1795 def lookup_filenode_link(fnode):
1796 return msngset[fnode]
1796 return msngset[fnode]
1797 return lookup_filenode_link
1797 return lookup_filenode_link
1798
1798
1799 # Add the nodes that were explicitly requested.
1799 # Add the nodes that were explicitly requested.
1800 def add_extra_nodes(name, nodes):
1800 def add_extra_nodes(name, nodes):
1801 if not extranodes or name not in extranodes:
1801 if not extranodes or name not in extranodes:
1802 return
1802 return
1803
1803
1804 for node, linknode in extranodes[name]:
1804 for node, linknode in extranodes[name]:
1805 if node not in nodes:
1805 if node not in nodes:
1806 nodes[node] = linknode
1806 nodes[node] = linknode
1807
1807
1808 # Now that we have all theses utility functions to help out and
1808 # Now that we have all theses utility functions to help out and
1809 # logically divide up the task, generate the group.
1809 # logically divide up the task, generate the group.
1810 def gengroup():
1810 def gengroup():
1811 # The set of changed files starts empty.
1811 # The set of changed files starts empty.
1812 changedfiles = {}
1812 changedfiles = {}
1813 # Create a changenode group generator that will call our functions
1813 # Create a changenode group generator that will call our functions
1814 # back to lookup the owning changenode and collect information.
1814 # back to lookup the owning changenode and collect information.
1815 group = cl.group(msng_cl_lst, identity,
1815 group = cl.group(msng_cl_lst, identity,
1816 manifest_and_file_collector(changedfiles))
1816 manifest_and_file_collector(changedfiles))
1817 for chnk in group:
1817 for chnk in group:
1818 yield chnk
1818 yield chnk
1819
1819
1820 # The list of manifests has been collected by the generator
1820 # The list of manifests has been collected by the generator
1821 # calling our functions back.
1821 # calling our functions back.
1822 prune_manifests()
1822 prune_manifests()
1823 add_extra_nodes(1, msng_mnfst_set)
1823 add_extra_nodes(1, msng_mnfst_set)
1824 msng_mnfst_lst = msng_mnfst_set.keys()
1824 msng_mnfst_lst = msng_mnfst_set.keys()
1825 # Sort the manifestnodes by revision number.
1825 # Sort the manifestnodes by revision number.
1826 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1826 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1827 # Create a generator for the manifestnodes that calls our lookup
1827 # Create a generator for the manifestnodes that calls our lookup
1828 # and data collection functions back.
1828 # and data collection functions back.
1829 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1829 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1830 filenode_collector(changedfiles))
1830 filenode_collector(changedfiles))
1831 for chnk in group:
1831 for chnk in group:
1832 yield chnk
1832 yield chnk
1833
1833
1834 # These are no longer needed, dereference and toss the memory for
1834 # These are no longer needed, dereference and toss the memory for
1835 # them.
1835 # them.
1836 msng_mnfst_lst = None
1836 msng_mnfst_lst = None
1837 msng_mnfst_set.clear()
1837 msng_mnfst_set.clear()
1838
1838
1839 if extranodes:
1839 if extranodes:
1840 for fname in extranodes:
1840 for fname in extranodes:
1841 if isinstance(fname, int):
1841 if isinstance(fname, int):
1842 continue
1842 continue
1843 add_extra_nodes(fname,
1843 add_extra_nodes(fname,
1844 msng_filenode_set.setdefault(fname, {}))
1844 msng_filenode_set.setdefault(fname, {}))
1845 changedfiles[fname] = 1
1845 changedfiles[fname] = 1
1846 changedfiles = changedfiles.keys()
1846 changedfiles = changedfiles.keys()
1847 changedfiles.sort()
1847 changedfiles.sort()
1848 # Go through all our files in order sorted by name.
1848 # Go through all our files in order sorted by name.
1849 for fname in changedfiles:
1849 for fname in changedfiles:
1850 filerevlog = self.file(fname)
1850 filerevlog = self.file(fname)
1851 if not len(filerevlog):
1851 if not len(filerevlog):
1852 raise util.Abort(_("empty or missing revlog for %s") % fname)
1852 raise util.Abort(_("empty or missing revlog for %s") % fname)
1853 # Toss out the filenodes that the recipient isn't really
1853 # Toss out the filenodes that the recipient isn't really
1854 # missing.
1854 # missing.
1855 if fname in msng_filenode_set:
1855 if fname in msng_filenode_set:
1856 prune_filenodes(fname, filerevlog)
1856 prune_filenodes(fname, filerevlog)
1857 msng_filenode_lst = msng_filenode_set[fname].keys()
1857 msng_filenode_lst = msng_filenode_set[fname].keys()
1858 else:
1858 else:
1859 msng_filenode_lst = []
1859 msng_filenode_lst = []
1860 # If any filenodes are left, generate the group for them,
1860 # If any filenodes are left, generate the group for them,
1861 # otherwise don't bother.
1861 # otherwise don't bother.
1862 if len(msng_filenode_lst) > 0:
1862 if len(msng_filenode_lst) > 0:
1863 yield changegroup.chunkheader(len(fname))
1863 yield changegroup.chunkheader(len(fname))
1864 yield fname
1864 yield fname
1865 # Sort the filenodes by their revision #
1865 # Sort the filenodes by their revision #
1866 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1866 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1867 # Create a group generator and only pass in a changenode
1867 # Create a group generator and only pass in a changenode
1868 # lookup function as we need to collect no information
1868 # lookup function as we need to collect no information
1869 # from filenodes.
1869 # from filenodes.
1870 group = filerevlog.group(msng_filenode_lst,
1870 group = filerevlog.group(msng_filenode_lst,
1871 lookup_filenode_link_func(fname))
1871 lookup_filenode_link_func(fname))
1872 for chnk in group:
1872 for chnk in group:
1873 yield chnk
1873 yield chnk
1874 if fname in msng_filenode_set:
1874 if fname in msng_filenode_set:
1875 # Don't need this anymore, toss it to free memory.
1875 # Don't need this anymore, toss it to free memory.
1876 del msng_filenode_set[fname]
1876 del msng_filenode_set[fname]
1877 # Signal that no more groups are left.
1877 # Signal that no more groups are left.
1878 yield changegroup.closechunk()
1878 yield changegroup.closechunk()
1879
1879
1880 if msng_cl_lst:
1880 if msng_cl_lst:
1881 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1881 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1882
1882
1883 return util.chunkbuffer(gengroup())
1883 return util.chunkbuffer(gengroup())
1884
1884
1885 def changegroup(self, basenodes, source):
1885 def changegroup(self, basenodes, source):
1886 """Generate a changegroup of all nodes that we have that a recipient
1886 """Generate a changegroup of all nodes that we have that a recipient
1887 doesn't.
1887 doesn't.
1888
1888
1889 This is much easier than the previous function as we can assume that
1889 This is much easier than the previous function as we can assume that
1890 the recipient has any changenode we aren't sending them."""
1890 the recipient has any changenode we aren't sending them."""
1891
1891
1892 self.hook('preoutgoing', throw=True, source=source)
1892 self.hook('preoutgoing', throw=True, source=source)
1893
1893
1894 cl = self.changelog
1894 cl = self.changelog
1895 nodes = cl.nodesbetween(basenodes, None)[0]
1895 nodes = cl.nodesbetween(basenodes, None)[0]
1896 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1896 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1897 self.changegroupinfo(nodes, source)
1897 self.changegroupinfo(nodes, source)
1898
1898
1899 def identity(x):
1899 def identity(x):
1900 return x
1900 return x
1901
1901
1902 def gennodelst(log):
1902 def gennodelst(log):
1903 for r in log:
1903 for r in log:
1904 n = log.node(r)
1904 n = log.node(r)
1905 if log.linkrev(n) in revset:
1905 if log.linkrev(n) in revset:
1906 yield n
1906 yield n
1907
1907
1908 def changed_file_collector(changedfileset):
1908 def changed_file_collector(changedfileset):
1909 def collect_changed_files(clnode):
1909 def collect_changed_files(clnode):
1910 c = cl.read(clnode)
1910 c = cl.read(clnode)
1911 for fname in c[3]:
1911 for fname in c[3]:
1912 changedfileset[fname] = 1
1912 changedfileset[fname] = 1
1913 return collect_changed_files
1913 return collect_changed_files
1914
1914
1915 def lookuprevlink_func(revlog):
1915 def lookuprevlink_func(revlog):
1916 def lookuprevlink(n):
1916 def lookuprevlink(n):
1917 return cl.node(revlog.linkrev(n))
1917 return cl.node(revlog.linkrev(n))
1918 return lookuprevlink
1918 return lookuprevlink
1919
1919
1920 def gengroup():
1920 def gengroup():
1921 # construct a list of all changed files
1921 # construct a list of all changed files
1922 changedfiles = {}
1922 changedfiles = {}
1923
1923
1924 for chnk in cl.group(nodes, identity,
1924 for chnk in cl.group(nodes, identity,
1925 changed_file_collector(changedfiles)):
1925 changed_file_collector(changedfiles)):
1926 yield chnk
1926 yield chnk
1927 changedfiles = changedfiles.keys()
1927 changedfiles = changedfiles.keys()
1928 changedfiles.sort()
1928 changedfiles.sort()
1929
1929
1930 mnfst = self.manifest
1930 mnfst = self.manifest
1931 nodeiter = gennodelst(mnfst)
1931 nodeiter = gennodelst(mnfst)
1932 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1932 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1933 yield chnk
1933 yield chnk
1934
1934
1935 for fname in changedfiles:
1935 for fname in changedfiles:
1936 filerevlog = self.file(fname)
1936 filerevlog = self.file(fname)
1937 if not len(filerevlog):
1937 if not len(filerevlog):
1938 raise util.Abort(_("empty or missing revlog for %s") % fname)
1938 raise util.Abort(_("empty or missing revlog for %s") % fname)
1939 nodeiter = gennodelst(filerevlog)
1939 nodeiter = gennodelst(filerevlog)
1940 nodeiter = list(nodeiter)
1940 nodeiter = list(nodeiter)
1941 if nodeiter:
1941 if nodeiter:
1942 yield changegroup.chunkheader(len(fname))
1942 yield changegroup.chunkheader(len(fname))
1943 yield fname
1943 yield fname
1944 lookup = lookuprevlink_func(filerevlog)
1944 lookup = lookuprevlink_func(filerevlog)
1945 for chnk in filerevlog.group(nodeiter, lookup):
1945 for chnk in filerevlog.group(nodeiter, lookup):
1946 yield chnk
1946 yield chnk
1947
1947
1948 yield changegroup.closechunk()
1948 yield changegroup.closechunk()
1949
1949
1950 if nodes:
1950 if nodes:
1951 self.hook('outgoing', node=hex(nodes[0]), source=source)
1951 self.hook('outgoing', node=hex(nodes[0]), source=source)
1952
1952
1953 return util.chunkbuffer(gengroup())
1953 return util.chunkbuffer(gengroup())
1954
1954
1955 def addchangegroup(self, source, srctype, url, emptyok=False):
1955 def addchangegroup(self, source, srctype, url, emptyok=False):
1956 """add changegroup to repo.
1956 """add changegroup to repo.
1957
1957
1958 return values:
1958 return values:
1959 - nothing changed or no source: 0
1959 - nothing changed or no source: 0
1960 - more heads than before: 1+added heads (2..n)
1960 - more heads than before: 1+added heads (2..n)
1961 - less heads than before: -1-removed heads (-2..-n)
1961 - less heads than before: -1-removed heads (-2..-n)
1962 - number of heads stays the same: 1
1962 - number of heads stays the same: 1
1963 """
1963 """
1964 def csmap(x):
1964 def csmap(x):
1965 self.ui.debug(_("add changeset %s\n") % short(x))
1965 self.ui.debug(_("add changeset %s\n") % short(x))
1966 return len(cl)
1966 return len(cl)
1967
1967
1968 def revmap(x):
1968 def revmap(x):
1969 return cl.rev(x)
1969 return cl.rev(x)
1970
1970
1971 if not source:
1971 if not source:
1972 return 0
1972 return 0
1973
1973
1974 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1974 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1975
1975
1976 changesets = files = revisions = 0
1976 changesets = files = revisions = 0
1977
1977
1978 # write changelog data to temp files so concurrent readers will not see
1978 # write changelog data to temp files so concurrent readers will not see
1979 # inconsistent view
1979 # inconsistent view
1980 cl = self.changelog
1980 cl = self.changelog
1981 cl.delayupdate()
1981 cl.delayupdate()
1982 oldheads = len(cl.heads())
1982 oldheads = len(cl.heads())
1983
1983
1984 tr = self.transaction()
1984 tr = self.transaction()
1985 try:
1985 try:
1986 trp = weakref.proxy(tr)
1986 trp = weakref.proxy(tr)
1987 # pull off the changeset group
1987 # pull off the changeset group
1988 self.ui.status(_("adding changesets\n"))
1988 self.ui.status(_("adding changesets\n"))
1989 cor = len(cl) - 1
1989 cor = len(cl) - 1
1990 chunkiter = changegroup.chunkiter(source)
1990 chunkiter = changegroup.chunkiter(source)
1991 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1991 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1992 raise util.Abort(_("received changelog group is empty"))
1992 raise util.Abort(_("received changelog group is empty"))
1993 cnr = len(cl) - 1
1993 cnr = len(cl) - 1
1994 changesets = cnr - cor
1994 changesets = cnr - cor
1995
1995
1996 # pull off the manifest group
1996 # pull off the manifest group
1997 self.ui.status(_("adding manifests\n"))
1997 self.ui.status(_("adding manifests\n"))
1998 chunkiter = changegroup.chunkiter(source)
1998 chunkiter = changegroup.chunkiter(source)
1999 # no need to check for empty manifest group here:
1999 # no need to check for empty manifest group here:
2000 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2000 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2001 # no new manifest will be created and the manifest group will
2001 # no new manifest will be created and the manifest group will
2002 # be empty during the pull
2002 # be empty during the pull
2003 self.manifest.addgroup(chunkiter, revmap, trp)
2003 self.manifest.addgroup(chunkiter, revmap, trp)
2004
2004
2005 # process the files
2005 # process the files
2006 self.ui.status(_("adding file changes\n"))
2006 self.ui.status(_("adding file changes\n"))
2007 while 1:
2007 while 1:
2008 f = changegroup.getchunk(source)
2008 f = changegroup.getchunk(source)
2009 if not f:
2009 if not f:
2010 break
2010 break
2011 self.ui.debug(_("adding %s revisions\n") % f)
2011 self.ui.debug(_("adding %s revisions\n") % f)
2012 fl = self.file(f)
2012 fl = self.file(f)
2013 o = len(fl)
2013 o = len(fl)
2014 chunkiter = changegroup.chunkiter(source)
2014 chunkiter = changegroup.chunkiter(source)
2015 if fl.addgroup(chunkiter, revmap, trp) is None:
2015 if fl.addgroup(chunkiter, revmap, trp) is None:
2016 raise util.Abort(_("received file revlog group is empty"))
2016 raise util.Abort(_("received file revlog group is empty"))
2017 revisions += len(fl) - o
2017 revisions += len(fl) - o
2018 files += 1
2018 files += 1
2019
2019
2020 # make changelog see real files again
2020 # make changelog see real files again
2021 cl.finalize(trp)
2021 cl.finalize(trp)
2022
2022
2023 newheads = len(self.changelog.heads())
2023 newheads = len(self.changelog.heads())
2024 heads = ""
2024 heads = ""
2025 if oldheads and newheads != oldheads:
2025 if oldheads and newheads != oldheads:
2026 heads = _(" (%+d heads)") % (newheads - oldheads)
2026 heads = _(" (%+d heads)") % (newheads - oldheads)
2027
2027
2028 self.ui.status(_("added %d changesets"
2028 self.ui.status(_("added %d changesets"
2029 " with %d changes to %d files%s\n")
2029 " with %d changes to %d files%s\n")
2030 % (changesets, revisions, files, heads))
2030 % (changesets, revisions, files, heads))
2031
2031
2032 if changesets > 0:
2032 if changesets > 0:
2033 self.hook('pretxnchangegroup', throw=True,
2033 self.hook('pretxnchangegroup', throw=True,
2034 node=hex(self.changelog.node(cor+1)), source=srctype,
2034 node=hex(self.changelog.node(cor+1)), source=srctype,
2035 url=url)
2035 url=url)
2036
2036
2037 tr.close()
2037 tr.close()
2038 finally:
2038 finally:
2039 del tr
2039 del tr
2040
2040
2041 if changesets > 0:
2041 if changesets > 0:
2042 # forcefully update the on-disk branch cache
2042 # forcefully update the on-disk branch cache
2043 self.ui.debug(_("updating the branch cache\n"))
2043 self.ui.debug(_("updating the branch cache\n"))
2044 self.branchtags()
2044 self.branchtags()
2045 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2045 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2046 source=srctype, url=url)
2046 source=srctype, url=url)
2047
2047
2048 for i in xrange(cor + 1, cnr + 1):
2048 for i in xrange(cor + 1, cnr + 1):
2049 self.hook("incoming", node=hex(self.changelog.node(i)),
2049 self.hook("incoming", node=hex(self.changelog.node(i)),
2050 source=srctype, url=url)
2050 source=srctype, url=url)
2051
2051
2052 # never return 0 here:
2052 # never return 0 here:
2053 if newheads < oldheads:
2053 if newheads < oldheads:
2054 return newheads - oldheads - 1
2054 return newheads - oldheads - 1
2055 else:
2055 else:
2056 return newheads - oldheads + 1
2056 return newheads - oldheads + 1
2057
2057
2058
2058
2059 def stream_in(self, remote):
2059 def stream_in(self, remote):
2060 fp = remote.stream_out()
2060 fp = remote.stream_out()
2061 l = fp.readline()
2061 l = fp.readline()
2062 try:
2062 try:
2063 resp = int(l)
2063 resp = int(l)
2064 except ValueError:
2064 except ValueError:
2065 raise util.UnexpectedOutput(
2065 raise util.UnexpectedOutput(
2066 _('Unexpected response from remote server:'), l)
2066 _('Unexpected response from remote server:'), l)
2067 if resp == 1:
2067 if resp == 1:
2068 raise util.Abort(_('operation forbidden by server'))
2068 raise util.Abort(_('operation forbidden by server'))
2069 elif resp == 2:
2069 elif resp == 2:
2070 raise util.Abort(_('locking the remote repository failed'))
2070 raise util.Abort(_('locking the remote repository failed'))
2071 elif resp != 0:
2071 elif resp != 0:
2072 raise util.Abort(_('the server sent an unknown error code'))
2072 raise util.Abort(_('the server sent an unknown error code'))
2073 self.ui.status(_('streaming all changes\n'))
2073 self.ui.status(_('streaming all changes\n'))
2074 l = fp.readline()
2074 l = fp.readline()
2075 try:
2075 try:
2076 total_files, total_bytes = map(int, l.split(' ', 1))
2076 total_files, total_bytes = map(int, l.split(' ', 1))
2077 except (ValueError, TypeError):
2077 except (ValueError, TypeError):
2078 raise util.UnexpectedOutput(
2078 raise util.UnexpectedOutput(
2079 _('Unexpected response from remote server:'), l)
2079 _('Unexpected response from remote server:'), l)
2080 self.ui.status(_('%d files to transfer, %s of data\n') %
2080 self.ui.status(_('%d files to transfer, %s of data\n') %
2081 (total_files, util.bytecount(total_bytes)))
2081 (total_files, util.bytecount(total_bytes)))
2082 start = time.time()
2082 start = time.time()
2083 for i in xrange(total_files):
2083 for i in xrange(total_files):
2084 # XXX doesn't support '\n' or '\r' in filenames
2084 # XXX doesn't support '\n' or '\r' in filenames
2085 l = fp.readline()
2085 l = fp.readline()
2086 try:
2086 try:
2087 name, size = l.split('\0', 1)
2087 name, size = l.split('\0', 1)
2088 size = int(size)
2088 size = int(size)
2089 except ValueError, TypeError:
2089 except ValueError, TypeError:
2090 raise util.UnexpectedOutput(
2090 raise util.UnexpectedOutput(
2091 _('Unexpected response from remote server:'), l)
2091 _('Unexpected response from remote server:'), l)
2092 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2092 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2093 ofp = self.sopener(name, 'w')
2093 ofp = self.sopener(name, 'w')
2094 for chunk in util.filechunkiter(fp, limit=size):
2094 for chunk in util.filechunkiter(fp, limit=size):
2095 ofp.write(chunk)
2095 ofp.write(chunk)
2096 ofp.close()
2096 ofp.close()
2097 elapsed = time.time() - start
2097 elapsed = time.time() - start
2098 if elapsed <= 0:
2098 if elapsed <= 0:
2099 elapsed = 0.001
2099 elapsed = 0.001
2100 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2100 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2101 (util.bytecount(total_bytes), elapsed,
2101 (util.bytecount(total_bytes), elapsed,
2102 util.bytecount(total_bytes / elapsed)))
2102 util.bytecount(total_bytes / elapsed)))
2103 self.invalidate()
2103 self.invalidate()
2104 return len(self.heads()) + 1
2104 return len(self.heads()) + 1
2105
2105
2106 def clone(self, remote, heads=[], stream=False):
2106 def clone(self, remote, heads=[], stream=False):
2107 '''clone remote repository.
2107 '''clone remote repository.
2108
2108
2109 keyword arguments:
2109 keyword arguments:
2110 heads: list of revs to clone (forces use of pull)
2110 heads: list of revs to clone (forces use of pull)
2111 stream: use streaming clone if possible'''
2111 stream: use streaming clone if possible'''
2112
2112
2113 # now, all clients that can request uncompressed clones can
2113 # now, all clients that can request uncompressed clones can
2114 # read repo formats supported by all servers that can serve
2114 # read repo formats supported by all servers that can serve
2115 # them.
2115 # them.
2116
2116
2117 # if revlog format changes, client will have to check version
2117 # if revlog format changes, client will have to check version
2118 # and format flags on "stream" capability, and use
2118 # and format flags on "stream" capability, and use
2119 # uncompressed only if compatible.
2119 # uncompressed only if compatible.
2120
2120
2121 if stream and not heads and remote.capable('stream'):
2121 if stream and not heads and remote.capable('stream'):
2122 return self.stream_in(remote)
2122 return self.stream_in(remote)
2123 return self.pull(remote, heads)
2123 return self.pull(remote, heads)
2124
2124
2125 # used to avoid circular references so destructors work
2125 # used to avoid circular references so destructors work
2126 def aftertrans(files):
2126 def aftertrans(files):
2127 renamefiles = [tuple(t) for t in files]
2127 renamefiles = [tuple(t) for t in files]
2128 def a():
2128 def a():
2129 for src, dest in renamefiles:
2129 for src, dest in renamefiles:
2130 util.rename(src, dest)
2130 util.rename(src, dest)
2131 return a
2131 return a
2132
2132
2133 def instance(ui, path, create):
2133 def instance(ui, path, create):
2134 return localrepository(ui, util.drop_scheme('file', path), create)
2134 return localrepository(ui, util.drop_scheme('file', path), create)
2135
2135
2136 def islocal(path):
2136 def islocal(path):
2137 return True
2137 return True
General Comments 0
You need to be logged in to leave comments. Login now