##// END OF EJS Templates
dirstate: add filecache support
Idan Kamara -
r16200:9d4a2942 stable
parent child Browse files
Show More
@@ -1,736 +1,737 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 import errno
7 import errno
8
8
9 from node import nullid
9 from node import nullid
10 from i18n import _
10 from i18n import _
11 import scmutil, util, ignore, osutil, parsers, encoding
11 import scmutil, util, ignore, osutil, parsers, encoding
12 import struct, os, stat, errno
12 import struct, os, stat, errno
13 import cStringIO
13 import cStringIO
14
14
15 _format = ">cllll"
15 _format = ">cllll"
16 propertycache = util.propertycache
16 propertycache = util.propertycache
17
17
18 def _finddirs(path):
18 def _finddirs(path):
19 pos = path.rfind('/')
19 pos = path.rfind('/')
20 while pos != -1:
20 while pos != -1:
21 yield path[:pos]
21 yield path[:pos]
22 pos = path.rfind('/', 0, pos)
22 pos = path.rfind('/', 0, pos)
23
23
24 def _incdirs(dirs, path):
24 def _incdirs(dirs, path):
25 for base in _finddirs(path):
25 for base in _finddirs(path):
26 if base in dirs:
26 if base in dirs:
27 dirs[base] += 1
27 dirs[base] += 1
28 return
28 return
29 dirs[base] = 1
29 dirs[base] = 1
30
30
31 def _decdirs(dirs, path):
31 def _decdirs(dirs, path):
32 for base in _finddirs(path):
32 for base in _finddirs(path):
33 if dirs[base] > 1:
33 if dirs[base] > 1:
34 dirs[base] -= 1
34 dirs[base] -= 1
35 return
35 return
36 del dirs[base]
36 del dirs[base]
37
37
38 class dirstate(object):
38 class dirstate(object):
39
39
40 def __init__(self, opener, ui, root, validate):
40 def __init__(self, opener, ui, root, validate):
41 '''Create a new dirstate object.
41 '''Create a new dirstate object.
42
42
43 opener is an open()-like callable that can be used to open the
43 opener is an open()-like callable that can be used to open the
44 dirstate file; root is the root of the directory tracked by
44 dirstate file; root is the root of the directory tracked by
45 the dirstate.
45 the dirstate.
46 '''
46 '''
47 self._opener = opener
47 self._opener = opener
48 self._validate = validate
48 self._validate = validate
49 self._root = root
49 self._root = root
50 self._rootdir = os.path.join(root, '')
50 self._rootdir = os.path.join(root, '')
51 self._dirty = False
51 self._dirty = False
52 self._dirtypl = False
52 self._dirtypl = False
53 self._lastnormaltime = 0
53 self._lastnormaltime = 0
54 self._ui = ui
54 self._ui = ui
55 self._filecache = {}
55
56
56 @propertycache
57 @propertycache
57 def _map(self):
58 def _map(self):
58 '''Return the dirstate contents as a map from filename to
59 '''Return the dirstate contents as a map from filename to
59 (state, mode, size, time).'''
60 (state, mode, size, time).'''
60 self._read()
61 self._read()
61 return self._map
62 return self._map
62
63
63 @propertycache
64 @propertycache
64 def _copymap(self):
65 def _copymap(self):
65 self._read()
66 self._read()
66 return self._copymap
67 return self._copymap
67
68
68 @propertycache
69 @propertycache
69 def _normroot(self):
70 def _normroot(self):
70 return util.normcase(self._root)
71 return util.normcase(self._root)
71
72
72 @propertycache
73 @propertycache
73 def _foldmap(self):
74 def _foldmap(self):
74 f = {}
75 f = {}
75 for name in self._map:
76 for name in self._map:
76 f[util.normcase(name)] = name
77 f[util.normcase(name)] = name
77 f['.'] = '.' # prevents useless util.fspath() invocation
78 f['.'] = '.' # prevents useless util.fspath() invocation
78 return f
79 return f
79
80
80 @propertycache
81 @propertycache
81 def _branch(self):
82 def _branch(self):
82 try:
83 try:
83 return self._opener.read("branch").strip() or "default"
84 return self._opener.read("branch").strip() or "default"
84 except IOError, inst:
85 except IOError, inst:
85 if inst.errno != errno.ENOENT:
86 if inst.errno != errno.ENOENT:
86 raise
87 raise
87 return "default"
88 return "default"
88
89
89 @propertycache
90 @propertycache
90 def _pl(self):
91 def _pl(self):
91 try:
92 try:
92 fp = self._opener("dirstate")
93 fp = self._opener("dirstate")
93 st = fp.read(40)
94 st = fp.read(40)
94 fp.close()
95 fp.close()
95 l = len(st)
96 l = len(st)
96 if l == 40:
97 if l == 40:
97 return st[:20], st[20:40]
98 return st[:20], st[20:40]
98 elif l > 0 and l < 40:
99 elif l > 0 and l < 40:
99 raise util.Abort(_('working directory state appears damaged!'))
100 raise util.Abort(_('working directory state appears damaged!'))
100 except IOError, err:
101 except IOError, err:
101 if err.errno != errno.ENOENT:
102 if err.errno != errno.ENOENT:
102 raise
103 raise
103 return [nullid, nullid]
104 return [nullid, nullid]
104
105
105 @propertycache
106 @propertycache
106 def _dirs(self):
107 def _dirs(self):
107 dirs = {}
108 dirs = {}
108 for f, s in self._map.iteritems():
109 for f, s in self._map.iteritems():
109 if s[0] != 'r':
110 if s[0] != 'r':
110 _incdirs(dirs, f)
111 _incdirs(dirs, f)
111 return dirs
112 return dirs
112
113
113 def dirs(self):
114 def dirs(self):
114 return self._dirs
115 return self._dirs
115
116
116 @propertycache
117 @propertycache
117 def _ignore(self):
118 def _ignore(self):
118 files = [self._join('.hgignore')]
119 files = [self._join('.hgignore')]
119 for name, path in self._ui.configitems("ui"):
120 for name, path in self._ui.configitems("ui"):
120 if name == 'ignore' or name.startswith('ignore.'):
121 if name == 'ignore' or name.startswith('ignore.'):
121 files.append(util.expandpath(path))
122 files.append(util.expandpath(path))
122 return ignore.ignore(self._root, files, self._ui.warn)
123 return ignore.ignore(self._root, files, self._ui.warn)
123
124
124 @propertycache
125 @propertycache
125 def _slash(self):
126 def _slash(self):
126 return self._ui.configbool('ui', 'slash') and os.sep != '/'
127 return self._ui.configbool('ui', 'slash') and os.sep != '/'
127
128
128 @propertycache
129 @propertycache
129 def _checklink(self):
130 def _checklink(self):
130 return util.checklink(self._root)
131 return util.checklink(self._root)
131
132
132 @propertycache
133 @propertycache
133 def _checkexec(self):
134 def _checkexec(self):
134 return util.checkexec(self._root)
135 return util.checkexec(self._root)
135
136
136 @propertycache
137 @propertycache
137 def _checkcase(self):
138 def _checkcase(self):
138 return not util.checkcase(self._join('.hg'))
139 return not util.checkcase(self._join('.hg'))
139
140
140 def _join(self, f):
141 def _join(self, f):
141 # much faster than os.path.join()
142 # much faster than os.path.join()
142 # it's safe because f is always a relative path
143 # it's safe because f is always a relative path
143 return self._rootdir + f
144 return self._rootdir + f
144
145
145 def flagfunc(self, buildfallback):
146 def flagfunc(self, buildfallback):
146 if self._checklink and self._checkexec:
147 if self._checklink and self._checkexec:
147 def f(x):
148 def f(x):
148 p = self._join(x)
149 p = self._join(x)
149 if os.path.islink(p):
150 if os.path.islink(p):
150 return 'l'
151 return 'l'
151 if util.isexec(p):
152 if util.isexec(p):
152 return 'x'
153 return 'x'
153 return ''
154 return ''
154 return f
155 return f
155
156
156 fallback = buildfallback()
157 fallback = buildfallback()
157 if self._checklink:
158 if self._checklink:
158 def f(x):
159 def f(x):
159 if os.path.islink(self._join(x)):
160 if os.path.islink(self._join(x)):
160 return 'l'
161 return 'l'
161 if 'x' in fallback(x):
162 if 'x' in fallback(x):
162 return 'x'
163 return 'x'
163 return ''
164 return ''
164 return f
165 return f
165 if self._checkexec:
166 if self._checkexec:
166 def f(x):
167 def f(x):
167 if 'l' in fallback(x):
168 if 'l' in fallback(x):
168 return 'l'
169 return 'l'
169 if util.isexec(self._join(x)):
170 if util.isexec(self._join(x)):
170 return 'x'
171 return 'x'
171 return ''
172 return ''
172 return f
173 return f
173 else:
174 else:
174 return fallback
175 return fallback
175
176
176 def getcwd(self):
177 def getcwd(self):
177 cwd = os.getcwd()
178 cwd = os.getcwd()
178 if cwd == self._root:
179 if cwd == self._root:
179 return ''
180 return ''
180 # self._root ends with a path separator if self._root is '/' or 'C:\'
181 # self._root ends with a path separator if self._root is '/' or 'C:\'
181 rootsep = self._root
182 rootsep = self._root
182 if not util.endswithsep(rootsep):
183 if not util.endswithsep(rootsep):
183 rootsep += os.sep
184 rootsep += os.sep
184 if cwd.startswith(rootsep):
185 if cwd.startswith(rootsep):
185 return cwd[len(rootsep):]
186 return cwd[len(rootsep):]
186 else:
187 else:
187 # we're outside the repo. return an absolute path.
188 # we're outside the repo. return an absolute path.
188 return cwd
189 return cwd
189
190
190 def pathto(self, f, cwd=None):
191 def pathto(self, f, cwd=None):
191 if cwd is None:
192 if cwd is None:
192 cwd = self.getcwd()
193 cwd = self.getcwd()
193 path = util.pathto(self._root, cwd, f)
194 path = util.pathto(self._root, cwd, f)
194 if self._slash:
195 if self._slash:
195 return util.normpath(path)
196 return util.normpath(path)
196 return path
197 return path
197
198
198 def __getitem__(self, key):
199 def __getitem__(self, key):
199 '''Return the current state of key (a filename) in the dirstate.
200 '''Return the current state of key (a filename) in the dirstate.
200
201
201 States are:
202 States are:
202 n normal
203 n normal
203 m needs merging
204 m needs merging
204 r marked for removal
205 r marked for removal
205 a marked for addition
206 a marked for addition
206 ? not tracked
207 ? not tracked
207 '''
208 '''
208 return self._map.get(key, ("?",))[0]
209 return self._map.get(key, ("?",))[0]
209
210
210 def __contains__(self, key):
211 def __contains__(self, key):
211 return key in self._map
212 return key in self._map
212
213
213 def __iter__(self):
214 def __iter__(self):
214 for x in sorted(self._map):
215 for x in sorted(self._map):
215 yield x
216 yield x
216
217
217 def parents(self):
218 def parents(self):
218 return [self._validate(p) for p in self._pl]
219 return [self._validate(p) for p in self._pl]
219
220
220 def p1(self):
221 def p1(self):
221 return self._validate(self._pl[0])
222 return self._validate(self._pl[0])
222
223
223 def p2(self):
224 def p2(self):
224 return self._validate(self._pl[1])
225 return self._validate(self._pl[1])
225
226
226 def branch(self):
227 def branch(self):
227 return encoding.tolocal(self._branch)
228 return encoding.tolocal(self._branch)
228
229
229 def setparents(self, p1, p2=nullid):
230 def setparents(self, p1, p2=nullid):
230 self._dirty = self._dirtypl = True
231 self._dirty = self._dirtypl = True
231 self._pl = p1, p2
232 self._pl = p1, p2
232
233
233 def setbranch(self, branch):
234 def setbranch(self, branch):
234 if branch in ['tip', '.', 'null']:
235 if branch in ['tip', '.', 'null']:
235 raise util.Abort(_('the name \'%s\' is reserved') % branch)
236 raise util.Abort(_('the name \'%s\' is reserved') % branch)
236 self._branch = encoding.fromlocal(branch)
237 self._branch = encoding.fromlocal(branch)
237 self._opener.write("branch", self._branch + '\n')
238 self._opener.write("branch", self._branch + '\n')
238
239
239 def _read(self):
240 def _read(self):
240 self._map = {}
241 self._map = {}
241 self._copymap = {}
242 self._copymap = {}
242 try:
243 try:
243 st = self._opener.read("dirstate")
244 st = self._opener.read("dirstate")
244 except IOError, err:
245 except IOError, err:
245 if err.errno != errno.ENOENT:
246 if err.errno != errno.ENOENT:
246 raise
247 raise
247 return
248 return
248 if not st:
249 if not st:
249 return
250 return
250
251
251 p = parsers.parse_dirstate(self._map, self._copymap, st)
252 p = parsers.parse_dirstate(self._map, self._copymap, st)
252 if not self._dirtypl:
253 if not self._dirtypl:
253 self._pl = p
254 self._pl = p
254
255
255 def invalidate(self):
256 def invalidate(self):
256 for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs",
257 for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs",
257 "_ignore"):
258 "_ignore"):
258 if a in self.__dict__:
259 if a in self.__dict__:
259 delattr(self, a)
260 delattr(self, a)
260 self._lastnormaltime = 0
261 self._lastnormaltime = 0
261 self._dirty = False
262 self._dirty = False
262
263
263 def copy(self, source, dest):
264 def copy(self, source, dest):
264 """Mark dest as a copy of source. Unmark dest if source is None."""
265 """Mark dest as a copy of source. Unmark dest if source is None."""
265 if source == dest:
266 if source == dest:
266 return
267 return
267 self._dirty = True
268 self._dirty = True
268 if source is not None:
269 if source is not None:
269 self._copymap[dest] = source
270 self._copymap[dest] = source
270 elif dest in self._copymap:
271 elif dest in self._copymap:
271 del self._copymap[dest]
272 del self._copymap[dest]
272
273
273 def copied(self, file):
274 def copied(self, file):
274 return self._copymap.get(file, None)
275 return self._copymap.get(file, None)
275
276
276 def copies(self):
277 def copies(self):
277 return self._copymap
278 return self._copymap
278
279
279 def _droppath(self, f):
280 def _droppath(self, f):
280 if self[f] not in "?r" and "_dirs" in self.__dict__:
281 if self[f] not in "?r" and "_dirs" in self.__dict__:
281 _decdirs(self._dirs, f)
282 _decdirs(self._dirs, f)
282
283
283 def _addpath(self, f, check=False):
284 def _addpath(self, f, check=False):
284 oldstate = self[f]
285 oldstate = self[f]
285 if check or oldstate == "r":
286 if check or oldstate == "r":
286 scmutil.checkfilename(f)
287 scmutil.checkfilename(f)
287 if f in self._dirs:
288 if f in self._dirs:
288 raise util.Abort(_('directory %r already in dirstate') % f)
289 raise util.Abort(_('directory %r already in dirstate') % f)
289 # shadows
290 # shadows
290 for d in _finddirs(f):
291 for d in _finddirs(f):
291 if d in self._dirs:
292 if d in self._dirs:
292 break
293 break
293 if d in self._map and self[d] != 'r':
294 if d in self._map and self[d] != 'r':
294 raise util.Abort(
295 raise util.Abort(
295 _('file %r in dirstate clashes with %r') % (d, f))
296 _('file %r in dirstate clashes with %r') % (d, f))
296 if oldstate in "?r" and "_dirs" in self.__dict__:
297 if oldstate in "?r" and "_dirs" in self.__dict__:
297 _incdirs(self._dirs, f)
298 _incdirs(self._dirs, f)
298
299
299 def normal(self, f):
300 def normal(self, f):
300 '''Mark a file normal and clean.'''
301 '''Mark a file normal and clean.'''
301 self._dirty = True
302 self._dirty = True
302 self._addpath(f)
303 self._addpath(f)
303 s = os.lstat(self._join(f))
304 s = os.lstat(self._join(f))
304 mtime = int(s.st_mtime)
305 mtime = int(s.st_mtime)
305 self._map[f] = ('n', s.st_mode, s.st_size, mtime)
306 self._map[f] = ('n', s.st_mode, s.st_size, mtime)
306 if f in self._copymap:
307 if f in self._copymap:
307 del self._copymap[f]
308 del self._copymap[f]
308 if mtime > self._lastnormaltime:
309 if mtime > self._lastnormaltime:
309 # Remember the most recent modification timeslot for status(),
310 # Remember the most recent modification timeslot for status(),
310 # to make sure we won't miss future size-preserving file content
311 # to make sure we won't miss future size-preserving file content
311 # modifications that happen within the same timeslot.
312 # modifications that happen within the same timeslot.
312 self._lastnormaltime = mtime
313 self._lastnormaltime = mtime
313
314
314 def normallookup(self, f):
315 def normallookup(self, f):
315 '''Mark a file normal, but possibly dirty.'''
316 '''Mark a file normal, but possibly dirty.'''
316 if self._pl[1] != nullid and f in self._map:
317 if self._pl[1] != nullid and f in self._map:
317 # if there is a merge going on and the file was either
318 # if there is a merge going on and the file was either
318 # in state 'm' (-1) or coming from other parent (-2) before
319 # in state 'm' (-1) or coming from other parent (-2) before
319 # being removed, restore that state.
320 # being removed, restore that state.
320 entry = self._map[f]
321 entry = self._map[f]
321 if entry[0] == 'r' and entry[2] in (-1, -2):
322 if entry[0] == 'r' and entry[2] in (-1, -2):
322 source = self._copymap.get(f)
323 source = self._copymap.get(f)
323 if entry[2] == -1:
324 if entry[2] == -1:
324 self.merge(f)
325 self.merge(f)
325 elif entry[2] == -2:
326 elif entry[2] == -2:
326 self.otherparent(f)
327 self.otherparent(f)
327 if source:
328 if source:
328 self.copy(source, f)
329 self.copy(source, f)
329 return
330 return
330 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
331 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
331 return
332 return
332 self._dirty = True
333 self._dirty = True
333 self._addpath(f)
334 self._addpath(f)
334 self._map[f] = ('n', 0, -1, -1)
335 self._map[f] = ('n', 0, -1, -1)
335 if f in self._copymap:
336 if f in self._copymap:
336 del self._copymap[f]
337 del self._copymap[f]
337
338
338 def otherparent(self, f):
339 def otherparent(self, f):
339 '''Mark as coming from the other parent, always dirty.'''
340 '''Mark as coming from the other parent, always dirty.'''
340 if self._pl[1] == nullid:
341 if self._pl[1] == nullid:
341 raise util.Abort(_("setting %r to other parent "
342 raise util.Abort(_("setting %r to other parent "
342 "only allowed in merges") % f)
343 "only allowed in merges") % f)
343 self._dirty = True
344 self._dirty = True
344 self._addpath(f)
345 self._addpath(f)
345 self._map[f] = ('n', 0, -2, -1)
346 self._map[f] = ('n', 0, -2, -1)
346 if f in self._copymap:
347 if f in self._copymap:
347 del self._copymap[f]
348 del self._copymap[f]
348
349
349 def add(self, f):
350 def add(self, f):
350 '''Mark a file added.'''
351 '''Mark a file added.'''
351 self._dirty = True
352 self._dirty = True
352 self._addpath(f, True)
353 self._addpath(f, True)
353 self._map[f] = ('a', 0, -1, -1)
354 self._map[f] = ('a', 0, -1, -1)
354 if f in self._copymap:
355 if f in self._copymap:
355 del self._copymap[f]
356 del self._copymap[f]
356
357
357 def remove(self, f):
358 def remove(self, f):
358 '''Mark a file removed.'''
359 '''Mark a file removed.'''
359 self._dirty = True
360 self._dirty = True
360 self._droppath(f)
361 self._droppath(f)
361 size = 0
362 size = 0
362 if self._pl[1] != nullid and f in self._map:
363 if self._pl[1] != nullid and f in self._map:
363 # backup the previous state
364 # backup the previous state
364 entry = self._map[f]
365 entry = self._map[f]
365 if entry[0] == 'm': # merge
366 if entry[0] == 'm': # merge
366 size = -1
367 size = -1
367 elif entry[0] == 'n' and entry[2] == -2: # other parent
368 elif entry[0] == 'n' and entry[2] == -2: # other parent
368 size = -2
369 size = -2
369 self._map[f] = ('r', 0, size, 0)
370 self._map[f] = ('r', 0, size, 0)
370 if size == 0 and f in self._copymap:
371 if size == 0 and f in self._copymap:
371 del self._copymap[f]
372 del self._copymap[f]
372
373
373 def merge(self, f):
374 def merge(self, f):
374 '''Mark a file merged.'''
375 '''Mark a file merged.'''
375 self._dirty = True
376 self._dirty = True
376 s = os.lstat(self._join(f))
377 s = os.lstat(self._join(f))
377 self._addpath(f)
378 self._addpath(f)
378 self._map[f] = ('m', s.st_mode, s.st_size, int(s.st_mtime))
379 self._map[f] = ('m', s.st_mode, s.st_size, int(s.st_mtime))
379 if f in self._copymap:
380 if f in self._copymap:
380 del self._copymap[f]
381 del self._copymap[f]
381
382
382 def drop(self, f):
383 def drop(self, f):
383 '''Drop a file from the dirstate'''
384 '''Drop a file from the dirstate'''
384 if f in self._map:
385 if f in self._map:
385 self._dirty = True
386 self._dirty = True
386 self._droppath(f)
387 self._droppath(f)
387 del self._map[f]
388 del self._map[f]
388
389
389 def _normalize(self, path, isknown):
390 def _normalize(self, path, isknown):
390 normed = util.normcase(path)
391 normed = util.normcase(path)
391 folded = self._foldmap.get(normed, None)
392 folded = self._foldmap.get(normed, None)
392 if folded is None:
393 if folded is None:
393 if isknown or not os.path.lexists(os.path.join(self._root, path)):
394 if isknown or not os.path.lexists(os.path.join(self._root, path)):
394 folded = path
395 folded = path
395 else:
396 else:
396 folded = self._foldmap.setdefault(normed,
397 folded = self._foldmap.setdefault(normed,
397 util.fspath(normed, self._normroot))
398 util.fspath(normed, self._normroot))
398 return folded
399 return folded
399
400
400 def normalize(self, path, isknown=False):
401 def normalize(self, path, isknown=False):
401 '''
402 '''
402 normalize the case of a pathname when on a casefolding filesystem
403 normalize the case of a pathname when on a casefolding filesystem
403
404
404 isknown specifies whether the filename came from walking the
405 isknown specifies whether the filename came from walking the
405 disk, to avoid extra filesystem access
406 disk, to avoid extra filesystem access
406
407
407 The normalized case is determined based on the following precedence:
408 The normalized case is determined based on the following precedence:
408
409
409 - version of name already stored in the dirstate
410 - version of name already stored in the dirstate
410 - version of name stored on disk
411 - version of name stored on disk
411 - version provided via command arguments
412 - version provided via command arguments
412 '''
413 '''
413
414
414 if self._checkcase:
415 if self._checkcase:
415 return self._normalize(path, isknown)
416 return self._normalize(path, isknown)
416 return path
417 return path
417
418
418 def clear(self):
419 def clear(self):
419 self._map = {}
420 self._map = {}
420 if "_dirs" in self.__dict__:
421 if "_dirs" in self.__dict__:
421 delattr(self, "_dirs")
422 delattr(self, "_dirs")
422 self._copymap = {}
423 self._copymap = {}
423 self._pl = [nullid, nullid]
424 self._pl = [nullid, nullid]
424 self._lastnormaltime = 0
425 self._lastnormaltime = 0
425 self._dirty = True
426 self._dirty = True
426
427
427 def rebuild(self, parent, files):
428 def rebuild(self, parent, files):
428 self.clear()
429 self.clear()
429 for f in files:
430 for f in files:
430 if 'x' in files.flags(f):
431 if 'x' in files.flags(f):
431 self._map[f] = ('n', 0777, -1, 0)
432 self._map[f] = ('n', 0777, -1, 0)
432 else:
433 else:
433 self._map[f] = ('n', 0666, -1, 0)
434 self._map[f] = ('n', 0666, -1, 0)
434 self._pl = (parent, nullid)
435 self._pl = (parent, nullid)
435 self._dirty = True
436 self._dirty = True
436
437
437 def write(self):
438 def write(self):
438 if not self._dirty:
439 if not self._dirty:
439 return
440 return
440 st = self._opener("dirstate", "w", atomictemp=True)
441 st = self._opener("dirstate", "w", atomictemp=True)
441
442
442 # use the modification time of the newly created temporary file as the
443 # use the modification time of the newly created temporary file as the
443 # filesystem's notion of 'now'
444 # filesystem's notion of 'now'
444 now = int(util.fstat(st).st_mtime)
445 now = int(util.fstat(st).st_mtime)
445
446
446 cs = cStringIO.StringIO()
447 cs = cStringIO.StringIO()
447 copymap = self._copymap
448 copymap = self._copymap
448 pack = struct.pack
449 pack = struct.pack
449 write = cs.write
450 write = cs.write
450 write("".join(self._pl))
451 write("".join(self._pl))
451 for f, e in self._map.iteritems():
452 for f, e in self._map.iteritems():
452 if e[0] == 'n' and e[3] == now:
453 if e[0] == 'n' and e[3] == now:
453 # The file was last modified "simultaneously" with the current
454 # The file was last modified "simultaneously" with the current
454 # write to dirstate (i.e. within the same second for file-
455 # write to dirstate (i.e. within the same second for file-
455 # systems with a granularity of 1 sec). This commonly happens
456 # systems with a granularity of 1 sec). This commonly happens
456 # for at least a couple of files on 'update'.
457 # for at least a couple of files on 'update'.
457 # The user could change the file without changing its size
458 # The user could change the file without changing its size
458 # within the same second. Invalidate the file's stat data in
459 # within the same second. Invalidate the file's stat data in
459 # dirstate, forcing future 'status' calls to compare the
460 # dirstate, forcing future 'status' calls to compare the
460 # contents of the file. This prevents mistakenly treating such
461 # contents of the file. This prevents mistakenly treating such
461 # files as clean.
462 # files as clean.
462 e = (e[0], 0, -1, -1) # mark entry as 'unset'
463 e = (e[0], 0, -1, -1) # mark entry as 'unset'
463 self._map[f] = e
464 self._map[f] = e
464
465
465 if f in copymap:
466 if f in copymap:
466 f = "%s\0%s" % (f, copymap[f])
467 f = "%s\0%s" % (f, copymap[f])
467 e = pack(_format, e[0], e[1], e[2], e[3], len(f))
468 e = pack(_format, e[0], e[1], e[2], e[3], len(f))
468 write(e)
469 write(e)
469 write(f)
470 write(f)
470 st.write(cs.getvalue())
471 st.write(cs.getvalue())
471 st.close()
472 st.close()
472 self._lastnormaltime = 0
473 self._lastnormaltime = 0
473 self._dirty = self._dirtypl = False
474 self._dirty = self._dirtypl = False
474
475
475 def _dirignore(self, f):
476 def _dirignore(self, f):
476 if f == '.':
477 if f == '.':
477 return False
478 return False
478 if self._ignore(f):
479 if self._ignore(f):
479 return True
480 return True
480 for p in _finddirs(f):
481 for p in _finddirs(f):
481 if self._ignore(p):
482 if self._ignore(p):
482 return True
483 return True
483 return False
484 return False
484
485
485 def walk(self, match, subrepos, unknown, ignored):
486 def walk(self, match, subrepos, unknown, ignored):
486 '''
487 '''
487 Walk recursively through the directory tree, finding all files
488 Walk recursively through the directory tree, finding all files
488 matched by match.
489 matched by match.
489
490
490 Return a dict mapping filename to stat-like object (either
491 Return a dict mapping filename to stat-like object (either
491 mercurial.osutil.stat instance or return value of os.stat()).
492 mercurial.osutil.stat instance or return value of os.stat()).
492 '''
493 '''
493
494
494 def fwarn(f, msg):
495 def fwarn(f, msg):
495 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
496 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
496 return False
497 return False
497
498
498 def badtype(mode):
499 def badtype(mode):
499 kind = _('unknown')
500 kind = _('unknown')
500 if stat.S_ISCHR(mode):
501 if stat.S_ISCHR(mode):
501 kind = _('character device')
502 kind = _('character device')
502 elif stat.S_ISBLK(mode):
503 elif stat.S_ISBLK(mode):
503 kind = _('block device')
504 kind = _('block device')
504 elif stat.S_ISFIFO(mode):
505 elif stat.S_ISFIFO(mode):
505 kind = _('fifo')
506 kind = _('fifo')
506 elif stat.S_ISSOCK(mode):
507 elif stat.S_ISSOCK(mode):
507 kind = _('socket')
508 kind = _('socket')
508 elif stat.S_ISDIR(mode):
509 elif stat.S_ISDIR(mode):
509 kind = _('directory')
510 kind = _('directory')
510 return _('unsupported file type (type is %s)') % kind
511 return _('unsupported file type (type is %s)') % kind
511
512
512 ignore = self._ignore
513 ignore = self._ignore
513 dirignore = self._dirignore
514 dirignore = self._dirignore
514 if ignored:
515 if ignored:
515 ignore = util.never
516 ignore = util.never
516 dirignore = util.never
517 dirignore = util.never
517 elif not unknown:
518 elif not unknown:
518 # if unknown and ignored are False, skip step 2
519 # if unknown and ignored are False, skip step 2
519 ignore = util.always
520 ignore = util.always
520 dirignore = util.always
521 dirignore = util.always
521
522
522 matchfn = match.matchfn
523 matchfn = match.matchfn
523 badfn = match.bad
524 badfn = match.bad
524 dmap = self._map
525 dmap = self._map
525 normpath = util.normpath
526 normpath = util.normpath
526 listdir = osutil.listdir
527 listdir = osutil.listdir
527 lstat = os.lstat
528 lstat = os.lstat
528 getkind = stat.S_IFMT
529 getkind = stat.S_IFMT
529 dirkind = stat.S_IFDIR
530 dirkind = stat.S_IFDIR
530 regkind = stat.S_IFREG
531 regkind = stat.S_IFREG
531 lnkkind = stat.S_IFLNK
532 lnkkind = stat.S_IFLNK
532 join = self._join
533 join = self._join
533 work = []
534 work = []
534 wadd = work.append
535 wadd = work.append
535
536
536 exact = skipstep3 = False
537 exact = skipstep3 = False
537 if matchfn == match.exact: # match.exact
538 if matchfn == match.exact: # match.exact
538 exact = True
539 exact = True
539 dirignore = util.always # skip step 2
540 dirignore = util.always # skip step 2
540 elif match.files() and not match.anypats(): # match.match, no patterns
541 elif match.files() and not match.anypats(): # match.match, no patterns
541 skipstep3 = True
542 skipstep3 = True
542
543
543 if self._checkcase:
544 if self._checkcase:
544 normalize = self._normalize
545 normalize = self._normalize
545 skipstep3 = False
546 skipstep3 = False
546 else:
547 else:
547 normalize = lambda x, y: x
548 normalize = lambda x, y: x
548
549
549 files = sorted(match.files())
550 files = sorted(match.files())
550 subrepos.sort()
551 subrepos.sort()
551 i, j = 0, 0
552 i, j = 0, 0
552 while i < len(files) and j < len(subrepos):
553 while i < len(files) and j < len(subrepos):
553 subpath = subrepos[j] + "/"
554 subpath = subrepos[j] + "/"
554 if files[i] < subpath:
555 if files[i] < subpath:
555 i += 1
556 i += 1
556 continue
557 continue
557 while i < len(files) and files[i].startswith(subpath):
558 while i < len(files) and files[i].startswith(subpath):
558 del files[i]
559 del files[i]
559 j += 1
560 j += 1
560
561
561 if not files or '.' in files:
562 if not files or '.' in files:
562 files = ['']
563 files = ['']
563 results = dict.fromkeys(subrepos)
564 results = dict.fromkeys(subrepos)
564 results['.hg'] = None
565 results['.hg'] = None
565
566
566 # step 1: find all explicit files
567 # step 1: find all explicit files
567 for ff in files:
568 for ff in files:
568 nf = normalize(normpath(ff), False)
569 nf = normalize(normpath(ff), False)
569 if nf in results:
570 if nf in results:
570 continue
571 continue
571
572
572 try:
573 try:
573 st = lstat(join(nf))
574 st = lstat(join(nf))
574 kind = getkind(st.st_mode)
575 kind = getkind(st.st_mode)
575 if kind == dirkind:
576 if kind == dirkind:
576 skipstep3 = False
577 skipstep3 = False
577 if nf in dmap:
578 if nf in dmap:
578 #file deleted on disk but still in dirstate
579 #file deleted on disk but still in dirstate
579 results[nf] = None
580 results[nf] = None
580 match.dir(nf)
581 match.dir(nf)
581 if not dirignore(nf):
582 if not dirignore(nf):
582 wadd(nf)
583 wadd(nf)
583 elif kind == regkind or kind == lnkkind:
584 elif kind == regkind or kind == lnkkind:
584 results[nf] = st
585 results[nf] = st
585 else:
586 else:
586 badfn(ff, badtype(kind))
587 badfn(ff, badtype(kind))
587 if nf in dmap:
588 if nf in dmap:
588 results[nf] = None
589 results[nf] = None
589 except OSError, inst:
590 except OSError, inst:
590 if nf in dmap: # does it exactly match a file?
591 if nf in dmap: # does it exactly match a file?
591 results[nf] = None
592 results[nf] = None
592 else: # does it match a directory?
593 else: # does it match a directory?
593 prefix = nf + "/"
594 prefix = nf + "/"
594 for fn in dmap:
595 for fn in dmap:
595 if fn.startswith(prefix):
596 if fn.startswith(prefix):
596 match.dir(nf)
597 match.dir(nf)
597 skipstep3 = False
598 skipstep3 = False
598 break
599 break
599 else:
600 else:
600 badfn(ff, inst.strerror)
601 badfn(ff, inst.strerror)
601
602
602 # step 2: visit subdirectories
603 # step 2: visit subdirectories
603 while work:
604 while work:
604 nd = work.pop()
605 nd = work.pop()
605 skip = None
606 skip = None
606 if nd == '.':
607 if nd == '.':
607 nd = ''
608 nd = ''
608 else:
609 else:
609 skip = '.hg'
610 skip = '.hg'
610 try:
611 try:
611 entries = listdir(join(nd), stat=True, skip=skip)
612 entries = listdir(join(nd), stat=True, skip=skip)
612 except OSError, inst:
613 except OSError, inst:
613 if inst.errno == errno.EACCES:
614 if inst.errno == errno.EACCES:
614 fwarn(nd, inst.strerror)
615 fwarn(nd, inst.strerror)
615 continue
616 continue
616 raise
617 raise
617 for f, kind, st in entries:
618 for f, kind, st in entries:
618 nf = normalize(nd and (nd + "/" + f) or f, True)
619 nf = normalize(nd and (nd + "/" + f) or f, True)
619 if nf not in results:
620 if nf not in results:
620 if kind == dirkind:
621 if kind == dirkind:
621 if not ignore(nf):
622 if not ignore(nf):
622 match.dir(nf)
623 match.dir(nf)
623 wadd(nf)
624 wadd(nf)
624 if nf in dmap and matchfn(nf):
625 if nf in dmap and matchfn(nf):
625 results[nf] = None
626 results[nf] = None
626 elif kind == regkind or kind == lnkkind:
627 elif kind == regkind or kind == lnkkind:
627 if nf in dmap:
628 if nf in dmap:
628 if matchfn(nf):
629 if matchfn(nf):
629 results[nf] = st
630 results[nf] = st
630 elif matchfn(nf) and not ignore(nf):
631 elif matchfn(nf) and not ignore(nf):
631 results[nf] = st
632 results[nf] = st
632 elif nf in dmap and matchfn(nf):
633 elif nf in dmap and matchfn(nf):
633 results[nf] = None
634 results[nf] = None
634
635
635 # step 3: report unseen items in the dmap hash
636 # step 3: report unseen items in the dmap hash
636 if not skipstep3 and not exact:
637 if not skipstep3 and not exact:
637 visit = sorted([f for f in dmap if f not in results and matchfn(f)])
638 visit = sorted([f for f in dmap if f not in results and matchfn(f)])
638 for nf, st in zip(visit, util.statfiles([join(i) for i in visit])):
639 for nf, st in zip(visit, util.statfiles([join(i) for i in visit])):
639 if not st is None and not getkind(st.st_mode) in (regkind, lnkkind):
640 if not st is None and not getkind(st.st_mode) in (regkind, lnkkind):
640 st = None
641 st = None
641 results[nf] = st
642 results[nf] = st
642 for s in subrepos:
643 for s in subrepos:
643 del results[s]
644 del results[s]
644 del results['.hg']
645 del results['.hg']
645 return results
646 return results
646
647
647 def status(self, match, subrepos, ignored, clean, unknown):
648 def status(self, match, subrepos, ignored, clean, unknown):
648 '''Determine the status of the working copy relative to the
649 '''Determine the status of the working copy relative to the
649 dirstate and return a tuple of lists (unsure, modified, added,
650 dirstate and return a tuple of lists (unsure, modified, added,
650 removed, deleted, unknown, ignored, clean), where:
651 removed, deleted, unknown, ignored, clean), where:
651
652
652 unsure:
653 unsure:
653 files that might have been modified since the dirstate was
654 files that might have been modified since the dirstate was
654 written, but need to be read to be sure (size is the same
655 written, but need to be read to be sure (size is the same
655 but mtime differs)
656 but mtime differs)
656 modified:
657 modified:
657 files that have definitely been modified since the dirstate
658 files that have definitely been modified since the dirstate
658 was written (different size or mode)
659 was written (different size or mode)
659 added:
660 added:
660 files that have been explicitly added with hg add
661 files that have been explicitly added with hg add
661 removed:
662 removed:
662 files that have been explicitly removed with hg remove
663 files that have been explicitly removed with hg remove
663 deleted:
664 deleted:
664 files that have been deleted through other means ("missing")
665 files that have been deleted through other means ("missing")
665 unknown:
666 unknown:
666 files not in the dirstate that are not ignored
667 files not in the dirstate that are not ignored
667 ignored:
668 ignored:
668 files not in the dirstate that are ignored
669 files not in the dirstate that are ignored
669 (by _dirignore())
670 (by _dirignore())
670 clean:
671 clean:
671 files that have definitely not been modified since the
672 files that have definitely not been modified since the
672 dirstate was written
673 dirstate was written
673 '''
674 '''
674 listignored, listclean, listunknown = ignored, clean, unknown
675 listignored, listclean, listunknown = ignored, clean, unknown
675 lookup, modified, added, unknown, ignored = [], [], [], [], []
676 lookup, modified, added, unknown, ignored = [], [], [], [], []
676 removed, deleted, clean = [], [], []
677 removed, deleted, clean = [], [], []
677
678
678 dmap = self._map
679 dmap = self._map
679 ladd = lookup.append # aka "unsure"
680 ladd = lookup.append # aka "unsure"
680 madd = modified.append
681 madd = modified.append
681 aadd = added.append
682 aadd = added.append
682 uadd = unknown.append
683 uadd = unknown.append
683 iadd = ignored.append
684 iadd = ignored.append
684 radd = removed.append
685 radd = removed.append
685 dadd = deleted.append
686 dadd = deleted.append
686 cadd = clean.append
687 cadd = clean.append
687
688
688 lnkkind = stat.S_IFLNK
689 lnkkind = stat.S_IFLNK
689
690
690 for fn, st in self.walk(match, subrepos, listunknown,
691 for fn, st in self.walk(match, subrepos, listunknown,
691 listignored).iteritems():
692 listignored).iteritems():
692 if fn not in dmap:
693 if fn not in dmap:
693 if (listignored or match.exact(fn)) and self._dirignore(fn):
694 if (listignored or match.exact(fn)) and self._dirignore(fn):
694 if listignored:
695 if listignored:
695 iadd(fn)
696 iadd(fn)
696 elif listunknown:
697 elif listunknown:
697 uadd(fn)
698 uadd(fn)
698 continue
699 continue
699
700
700 state, mode, size, time = dmap[fn]
701 state, mode, size, time = dmap[fn]
701
702
702 if not st and state in "nma":
703 if not st and state in "nma":
703 dadd(fn)
704 dadd(fn)
704 elif state == 'n':
705 elif state == 'n':
705 # The "mode & lnkkind != lnkkind or self._checklink"
706 # The "mode & lnkkind != lnkkind or self._checklink"
706 # lines are an expansion of "islink => checklink"
707 # lines are an expansion of "islink => checklink"
707 # where islink means "is this a link?" and checklink
708 # where islink means "is this a link?" and checklink
708 # means "can we check links?".
709 # means "can we check links?".
709 mtime = int(st.st_mtime)
710 mtime = int(st.st_mtime)
710 if (size >= 0 and
711 if (size >= 0 and
711 (size != st.st_size
712 (size != st.st_size
712 or ((mode ^ st.st_mode) & 0100 and self._checkexec))
713 or ((mode ^ st.st_mode) & 0100 and self._checkexec))
713 and (mode & lnkkind != lnkkind or self._checklink)
714 and (mode & lnkkind != lnkkind or self._checklink)
714 or size == -2 # other parent
715 or size == -2 # other parent
715 or fn in self._copymap):
716 or fn in self._copymap):
716 madd(fn)
717 madd(fn)
717 elif (mtime != time
718 elif (mtime != time
718 and (mode & lnkkind != lnkkind or self._checklink)):
719 and (mode & lnkkind != lnkkind or self._checklink)):
719 ladd(fn)
720 ladd(fn)
720 elif mtime == self._lastnormaltime:
721 elif mtime == self._lastnormaltime:
721 # fn may have been changed in the same timeslot without
722 # fn may have been changed in the same timeslot without
722 # changing its size. This can happen if we quickly do
723 # changing its size. This can happen if we quickly do
723 # multiple commits in a single transaction.
724 # multiple commits in a single transaction.
724 # Force lookup, so we don't miss such a racy file change.
725 # Force lookup, so we don't miss such a racy file change.
725 ladd(fn)
726 ladd(fn)
726 elif listclean:
727 elif listclean:
727 cadd(fn)
728 cadd(fn)
728 elif state == 'm':
729 elif state == 'm':
729 madd(fn)
730 madd(fn)
730 elif state == 'a':
731 elif state == 'a':
731 aadd(fn)
732 aadd(fn)
732 elif state == 'r':
733 elif state == 'r':
733 radd(fn)
734 radd(fn)
734
735
735 return (lookup, modified, added, removed, deleted, unknown, ignored,
736 return (lookup, modified, added, removed, deleted, unknown, ignored,
736 clean)
737 clean)
@@ -1,2321 +1,2324 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class storecache(filecache):
22 class storecache(filecache):
23 """filecache for files in the store"""
23 """filecache for files in the store"""
24 def join(self, obj, fname):
24 def join(self, obj, fname):
25 return obj.sjoin(fname)
25 return obj.sjoin(fname)
26
26
27 class localrepository(repo.repository):
27 class localrepository(repo.repository):
28 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
28 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
29 'known', 'getbundle'))
29 'known', 'getbundle'))
30 supportedformats = set(('revlogv1', 'generaldelta'))
30 supportedformats = set(('revlogv1', 'generaldelta'))
31 supported = supportedformats | set(('store', 'fncache', 'shared',
31 supported = supportedformats | set(('store', 'fncache', 'shared',
32 'dotencode'))
32 'dotencode'))
33
33
34 def __init__(self, baseui, path=None, create=False):
34 def __init__(self, baseui, path=None, create=False):
35 repo.repository.__init__(self)
35 repo.repository.__init__(self)
36 self.root = os.path.realpath(util.expandpath(path))
36 self.root = os.path.realpath(util.expandpath(path))
37 self.path = os.path.join(self.root, ".hg")
37 self.path = os.path.join(self.root, ".hg")
38 self.origroot = path
38 self.origroot = path
39 self.auditor = scmutil.pathauditor(self.root, self._checknested)
39 self.auditor = scmutil.pathauditor(self.root, self._checknested)
40 self.opener = scmutil.opener(self.path)
40 self.opener = scmutil.opener(self.path)
41 self.wopener = scmutil.opener(self.root)
41 self.wopener = scmutil.opener(self.root)
42 self.baseui = baseui
42 self.baseui = baseui
43 self.ui = baseui.copy()
43 self.ui = baseui.copy()
44 self._dirtyphases = False
44 self._dirtyphases = False
45 # A list of callback to shape the phase if no data were found.
45 # A list of callback to shape the phase if no data were found.
46 # Callback are in the form: func(repo, roots) --> processed root.
46 # Callback are in the form: func(repo, roots) --> processed root.
47 # This list it to be filled by extension during repo setup
47 # This list it to be filled by extension during repo setup
48 self._phasedefaults = []
48 self._phasedefaults = []
49
49
50 try:
50 try:
51 self.ui.readconfig(self.join("hgrc"), self.root)
51 self.ui.readconfig(self.join("hgrc"), self.root)
52 extensions.loadall(self.ui)
52 extensions.loadall(self.ui)
53 except IOError:
53 except IOError:
54 pass
54 pass
55
55
56 if not os.path.isdir(self.path):
56 if not os.path.isdir(self.path):
57 if create:
57 if create:
58 if not os.path.exists(path):
58 if not os.path.exists(path):
59 util.makedirs(path)
59 util.makedirs(path)
60 util.makedir(self.path, notindexed=True)
60 util.makedir(self.path, notindexed=True)
61 requirements = ["revlogv1"]
61 requirements = ["revlogv1"]
62 if self.ui.configbool('format', 'usestore', True):
62 if self.ui.configbool('format', 'usestore', True):
63 os.mkdir(os.path.join(self.path, "store"))
63 os.mkdir(os.path.join(self.path, "store"))
64 requirements.append("store")
64 requirements.append("store")
65 if self.ui.configbool('format', 'usefncache', True):
65 if self.ui.configbool('format', 'usefncache', True):
66 requirements.append("fncache")
66 requirements.append("fncache")
67 if self.ui.configbool('format', 'dotencode', True):
67 if self.ui.configbool('format', 'dotencode', True):
68 requirements.append('dotencode')
68 requirements.append('dotencode')
69 # create an invalid changelog
69 # create an invalid changelog
70 self.opener.append(
70 self.opener.append(
71 "00changelog.i",
71 "00changelog.i",
72 '\0\0\0\2' # represents revlogv2
72 '\0\0\0\2' # represents revlogv2
73 ' dummy changelog to prevent using the old repo layout'
73 ' dummy changelog to prevent using the old repo layout'
74 )
74 )
75 if self.ui.configbool('format', 'generaldelta', False):
75 if self.ui.configbool('format', 'generaldelta', False):
76 requirements.append("generaldelta")
76 requirements.append("generaldelta")
77 requirements = set(requirements)
77 requirements = set(requirements)
78 else:
78 else:
79 raise error.RepoError(_("repository %s not found") % path)
79 raise error.RepoError(_("repository %s not found") % path)
80 elif create:
80 elif create:
81 raise error.RepoError(_("repository %s already exists") % path)
81 raise error.RepoError(_("repository %s already exists") % path)
82 else:
82 else:
83 try:
83 try:
84 requirements = scmutil.readrequires(self.opener, self.supported)
84 requirements = scmutil.readrequires(self.opener, self.supported)
85 except IOError, inst:
85 except IOError, inst:
86 if inst.errno != errno.ENOENT:
86 if inst.errno != errno.ENOENT:
87 raise
87 raise
88 requirements = set()
88 requirements = set()
89
89
90 self.sharedpath = self.path
90 self.sharedpath = self.path
91 try:
91 try:
92 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
92 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
93 if not os.path.exists(s):
93 if not os.path.exists(s):
94 raise error.RepoError(
94 raise error.RepoError(
95 _('.hg/sharedpath points to nonexistent directory %s') % s)
95 _('.hg/sharedpath points to nonexistent directory %s') % s)
96 self.sharedpath = s
96 self.sharedpath = s
97 except IOError, inst:
97 except IOError, inst:
98 if inst.errno != errno.ENOENT:
98 if inst.errno != errno.ENOENT:
99 raise
99 raise
100
100
101 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
101 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
102 self.spath = self.store.path
102 self.spath = self.store.path
103 self.sopener = self.store.opener
103 self.sopener = self.store.opener
104 self.sjoin = self.store.join
104 self.sjoin = self.store.join
105 self.opener.createmode = self.store.createmode
105 self.opener.createmode = self.store.createmode
106 self._applyrequirements(requirements)
106 self._applyrequirements(requirements)
107 if create:
107 if create:
108 self._writerequirements()
108 self._writerequirements()
109
109
110
110
111 self._branchcache = None
111 self._branchcache = None
112 self._branchcachetip = None
112 self._branchcachetip = None
113 self.filterpats = {}
113 self.filterpats = {}
114 self._datafilters = {}
114 self._datafilters = {}
115 self._transref = self._lockref = self._wlockref = None
115 self._transref = self._lockref = self._wlockref = None
116
116
117 # A cache for various files under .hg/ that tracks file changes,
117 # A cache for various files under .hg/ that tracks file changes,
118 # (used by the filecache decorator)
118 # (used by the filecache decorator)
119 #
119 #
120 # Maps a property name to its util.filecacheentry
120 # Maps a property name to its util.filecacheentry
121 self._filecache = {}
121 self._filecache = {}
122
122
123 def _applyrequirements(self, requirements):
123 def _applyrequirements(self, requirements):
124 self.requirements = requirements
124 self.requirements = requirements
125 openerreqs = set(('revlogv1', 'generaldelta'))
125 openerreqs = set(('revlogv1', 'generaldelta'))
126 self.sopener.options = dict((r, 1) for r in requirements
126 self.sopener.options = dict((r, 1) for r in requirements
127 if r in openerreqs)
127 if r in openerreqs)
128
128
129 def _writerequirements(self):
129 def _writerequirements(self):
130 reqfile = self.opener("requires", "w")
130 reqfile = self.opener("requires", "w")
131 for r in self.requirements:
131 for r in self.requirements:
132 reqfile.write("%s\n" % r)
132 reqfile.write("%s\n" % r)
133 reqfile.close()
133 reqfile.close()
134
134
135 def _checknested(self, path):
135 def _checknested(self, path):
136 """Determine if path is a legal nested repository."""
136 """Determine if path is a legal nested repository."""
137 if not path.startswith(self.root):
137 if not path.startswith(self.root):
138 return False
138 return False
139 subpath = path[len(self.root) + 1:]
139 subpath = path[len(self.root) + 1:]
140 normsubpath = util.pconvert(subpath)
140 normsubpath = util.pconvert(subpath)
141
141
142 # XXX: Checking against the current working copy is wrong in
142 # XXX: Checking against the current working copy is wrong in
143 # the sense that it can reject things like
143 # the sense that it can reject things like
144 #
144 #
145 # $ hg cat -r 10 sub/x.txt
145 # $ hg cat -r 10 sub/x.txt
146 #
146 #
147 # if sub/ is no longer a subrepository in the working copy
147 # if sub/ is no longer a subrepository in the working copy
148 # parent revision.
148 # parent revision.
149 #
149 #
150 # However, it can of course also allow things that would have
150 # However, it can of course also allow things that would have
151 # been rejected before, such as the above cat command if sub/
151 # been rejected before, such as the above cat command if sub/
152 # is a subrepository now, but was a normal directory before.
152 # is a subrepository now, but was a normal directory before.
153 # The old path auditor would have rejected by mistake since it
153 # The old path auditor would have rejected by mistake since it
154 # panics when it sees sub/.hg/.
154 # panics when it sees sub/.hg/.
155 #
155 #
156 # All in all, checking against the working copy seems sensible
156 # All in all, checking against the working copy seems sensible
157 # since we want to prevent access to nested repositories on
157 # since we want to prevent access to nested repositories on
158 # the filesystem *now*.
158 # the filesystem *now*.
159 ctx = self[None]
159 ctx = self[None]
160 parts = util.splitpath(subpath)
160 parts = util.splitpath(subpath)
161 while parts:
161 while parts:
162 prefix = '/'.join(parts)
162 prefix = '/'.join(parts)
163 if prefix in ctx.substate:
163 if prefix in ctx.substate:
164 if prefix == normsubpath:
164 if prefix == normsubpath:
165 return True
165 return True
166 else:
166 else:
167 sub = ctx.sub(prefix)
167 sub = ctx.sub(prefix)
168 return sub.checknested(subpath[len(prefix) + 1:])
168 return sub.checknested(subpath[len(prefix) + 1:])
169 else:
169 else:
170 parts.pop()
170 parts.pop()
171 return False
171 return False
172
172
173 @filecache('bookmarks')
173 @filecache('bookmarks')
174 def _bookmarks(self):
174 def _bookmarks(self):
175 return bookmarks.read(self)
175 return bookmarks.read(self)
176
176
177 @filecache('bookmarks.current')
177 @filecache('bookmarks.current')
178 def _bookmarkcurrent(self):
178 def _bookmarkcurrent(self):
179 return bookmarks.readcurrent(self)
179 return bookmarks.readcurrent(self)
180
180
181 def _writebookmarks(self, marks):
181 def _writebookmarks(self, marks):
182 bookmarks.write(self)
182 bookmarks.write(self)
183
183
184 @storecache('phaseroots')
184 @storecache('phaseroots')
185 def _phaseroots(self):
185 def _phaseroots(self):
186 self._dirtyphases = False
186 self._dirtyphases = False
187 phaseroots = phases.readroots(self)
187 phaseroots = phases.readroots(self)
188 phases.filterunknown(self, phaseroots)
188 phases.filterunknown(self, phaseroots)
189 return phaseroots
189 return phaseroots
190
190
191 @propertycache
191 @propertycache
192 def _phaserev(self):
192 def _phaserev(self):
193 cache = [phases.public] * len(self)
193 cache = [phases.public] * len(self)
194 for phase in phases.trackedphases:
194 for phase in phases.trackedphases:
195 roots = map(self.changelog.rev, self._phaseroots[phase])
195 roots = map(self.changelog.rev, self._phaseroots[phase])
196 if roots:
196 if roots:
197 for rev in roots:
197 for rev in roots:
198 cache[rev] = phase
198 cache[rev] = phase
199 for rev in self.changelog.descendants(*roots):
199 for rev in self.changelog.descendants(*roots):
200 cache[rev] = phase
200 cache[rev] = phase
201 return cache
201 return cache
202
202
203 @storecache('00changelog.i')
203 @storecache('00changelog.i')
204 def changelog(self):
204 def changelog(self):
205 c = changelog.changelog(self.sopener)
205 c = changelog.changelog(self.sopener)
206 if 'HG_PENDING' in os.environ:
206 if 'HG_PENDING' in os.environ:
207 p = os.environ['HG_PENDING']
207 p = os.environ['HG_PENDING']
208 if p.startswith(self.root):
208 if p.startswith(self.root):
209 c.readpending('00changelog.i.a')
209 c.readpending('00changelog.i.a')
210 return c
210 return c
211
211
212 @storecache('00manifest.i')
212 @storecache('00manifest.i')
213 def manifest(self):
213 def manifest(self):
214 return manifest.manifest(self.sopener)
214 return manifest.manifest(self.sopener)
215
215
216 @filecache('dirstate')
216 @filecache('dirstate')
217 def dirstate(self):
217 def dirstate(self):
218 warned = [0]
218 warned = [0]
219 def validate(node):
219 def validate(node):
220 try:
220 try:
221 self.changelog.rev(node)
221 self.changelog.rev(node)
222 return node
222 return node
223 except error.LookupError:
223 except error.LookupError:
224 if not warned[0]:
224 if not warned[0]:
225 warned[0] = True
225 warned[0] = True
226 self.ui.warn(_("warning: ignoring unknown"
226 self.ui.warn(_("warning: ignoring unknown"
227 " working parent %s!\n") % short(node))
227 " working parent %s!\n") % short(node))
228 return nullid
228 return nullid
229
229
230 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
230 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
231
231
232 def __getitem__(self, changeid):
232 def __getitem__(self, changeid):
233 if changeid is None:
233 if changeid is None:
234 return context.workingctx(self)
234 return context.workingctx(self)
235 return context.changectx(self, changeid)
235 return context.changectx(self, changeid)
236
236
237 def __contains__(self, changeid):
237 def __contains__(self, changeid):
238 try:
238 try:
239 return bool(self.lookup(changeid))
239 return bool(self.lookup(changeid))
240 except error.RepoLookupError:
240 except error.RepoLookupError:
241 return False
241 return False
242
242
243 def __nonzero__(self):
243 def __nonzero__(self):
244 return True
244 return True
245
245
246 def __len__(self):
246 def __len__(self):
247 return len(self.changelog)
247 return len(self.changelog)
248
248
249 def __iter__(self):
249 def __iter__(self):
250 for i in xrange(len(self)):
250 for i in xrange(len(self)):
251 yield i
251 yield i
252
252
253 def revs(self, expr, *args):
253 def revs(self, expr, *args):
254 '''Return a list of revisions matching the given revset'''
254 '''Return a list of revisions matching the given revset'''
255 expr = revset.formatspec(expr, *args)
255 expr = revset.formatspec(expr, *args)
256 m = revset.match(None, expr)
256 m = revset.match(None, expr)
257 return [r for r in m(self, range(len(self)))]
257 return [r for r in m(self, range(len(self)))]
258
258
259 def set(self, expr, *args):
259 def set(self, expr, *args):
260 '''
260 '''
261 Yield a context for each matching revision, after doing arg
261 Yield a context for each matching revision, after doing arg
262 replacement via revset.formatspec
262 replacement via revset.formatspec
263 '''
263 '''
264 for r in self.revs(expr, *args):
264 for r in self.revs(expr, *args):
265 yield self[r]
265 yield self[r]
266
266
267 def url(self):
267 def url(self):
268 return 'file:' + self.root
268 return 'file:' + self.root
269
269
270 def hook(self, name, throw=False, **args):
270 def hook(self, name, throw=False, **args):
271 return hook.hook(self.ui, self, name, throw, **args)
271 return hook.hook(self.ui, self, name, throw, **args)
272
272
273 tag_disallowed = ':\r\n'
273 tag_disallowed = ':\r\n'
274
274
275 def _tag(self, names, node, message, local, user, date, extra={}):
275 def _tag(self, names, node, message, local, user, date, extra={}):
276 if isinstance(names, str):
276 if isinstance(names, str):
277 allchars = names
277 allchars = names
278 names = (names,)
278 names = (names,)
279 else:
279 else:
280 allchars = ''.join(names)
280 allchars = ''.join(names)
281 for c in self.tag_disallowed:
281 for c in self.tag_disallowed:
282 if c in allchars:
282 if c in allchars:
283 raise util.Abort(_('%r cannot be used in a tag name') % c)
283 raise util.Abort(_('%r cannot be used in a tag name') % c)
284
284
285 branches = self.branchmap()
285 branches = self.branchmap()
286 for name in names:
286 for name in names:
287 self.hook('pretag', throw=True, node=hex(node), tag=name,
287 self.hook('pretag', throw=True, node=hex(node), tag=name,
288 local=local)
288 local=local)
289 if name in branches:
289 if name in branches:
290 self.ui.warn(_("warning: tag %s conflicts with existing"
290 self.ui.warn(_("warning: tag %s conflicts with existing"
291 " branch name\n") % name)
291 " branch name\n") % name)
292
292
293 def writetags(fp, names, munge, prevtags):
293 def writetags(fp, names, munge, prevtags):
294 fp.seek(0, 2)
294 fp.seek(0, 2)
295 if prevtags and prevtags[-1] != '\n':
295 if prevtags and prevtags[-1] != '\n':
296 fp.write('\n')
296 fp.write('\n')
297 for name in names:
297 for name in names:
298 m = munge and munge(name) or name
298 m = munge and munge(name) or name
299 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
299 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
300 old = self.tags().get(name, nullid)
300 old = self.tags().get(name, nullid)
301 fp.write('%s %s\n' % (hex(old), m))
301 fp.write('%s %s\n' % (hex(old), m))
302 fp.write('%s %s\n' % (hex(node), m))
302 fp.write('%s %s\n' % (hex(node), m))
303 fp.close()
303 fp.close()
304
304
305 prevtags = ''
305 prevtags = ''
306 if local:
306 if local:
307 try:
307 try:
308 fp = self.opener('localtags', 'r+')
308 fp = self.opener('localtags', 'r+')
309 except IOError:
309 except IOError:
310 fp = self.opener('localtags', 'a')
310 fp = self.opener('localtags', 'a')
311 else:
311 else:
312 prevtags = fp.read()
312 prevtags = fp.read()
313
313
314 # local tags are stored in the current charset
314 # local tags are stored in the current charset
315 writetags(fp, names, None, prevtags)
315 writetags(fp, names, None, prevtags)
316 for name in names:
316 for name in names:
317 self.hook('tag', node=hex(node), tag=name, local=local)
317 self.hook('tag', node=hex(node), tag=name, local=local)
318 return
318 return
319
319
320 try:
320 try:
321 fp = self.wfile('.hgtags', 'rb+')
321 fp = self.wfile('.hgtags', 'rb+')
322 except IOError, e:
322 except IOError, e:
323 if e.errno != errno.ENOENT:
323 if e.errno != errno.ENOENT:
324 raise
324 raise
325 fp = self.wfile('.hgtags', 'ab')
325 fp = self.wfile('.hgtags', 'ab')
326 else:
326 else:
327 prevtags = fp.read()
327 prevtags = fp.read()
328
328
329 # committed tags are stored in UTF-8
329 # committed tags are stored in UTF-8
330 writetags(fp, names, encoding.fromlocal, prevtags)
330 writetags(fp, names, encoding.fromlocal, prevtags)
331
331
332 fp.close()
332 fp.close()
333
333
334 self.invalidatecaches()
334 self.invalidatecaches()
335
335
336 if '.hgtags' not in self.dirstate:
336 if '.hgtags' not in self.dirstate:
337 self[None].add(['.hgtags'])
337 self[None].add(['.hgtags'])
338
338
339 m = matchmod.exact(self.root, '', ['.hgtags'])
339 m = matchmod.exact(self.root, '', ['.hgtags'])
340 tagnode = self.commit(message, user, date, extra=extra, match=m)
340 tagnode = self.commit(message, user, date, extra=extra, match=m)
341
341
342 for name in names:
342 for name in names:
343 self.hook('tag', node=hex(node), tag=name, local=local)
343 self.hook('tag', node=hex(node), tag=name, local=local)
344
344
345 return tagnode
345 return tagnode
346
346
347 def tag(self, names, node, message, local, user, date):
347 def tag(self, names, node, message, local, user, date):
348 '''tag a revision with one or more symbolic names.
348 '''tag a revision with one or more symbolic names.
349
349
350 names is a list of strings or, when adding a single tag, names may be a
350 names is a list of strings or, when adding a single tag, names may be a
351 string.
351 string.
352
352
353 if local is True, the tags are stored in a per-repository file.
353 if local is True, the tags are stored in a per-repository file.
354 otherwise, they are stored in the .hgtags file, and a new
354 otherwise, they are stored in the .hgtags file, and a new
355 changeset is committed with the change.
355 changeset is committed with the change.
356
356
357 keyword arguments:
357 keyword arguments:
358
358
359 local: whether to store tags in non-version-controlled file
359 local: whether to store tags in non-version-controlled file
360 (default False)
360 (default False)
361
361
362 message: commit message to use if committing
362 message: commit message to use if committing
363
363
364 user: name of user to use if committing
364 user: name of user to use if committing
365
365
366 date: date tuple to use if committing'''
366 date: date tuple to use if committing'''
367
367
368 if not local:
368 if not local:
369 for x in self.status()[:5]:
369 for x in self.status()[:5]:
370 if '.hgtags' in x:
370 if '.hgtags' in x:
371 raise util.Abort(_('working copy of .hgtags is changed '
371 raise util.Abort(_('working copy of .hgtags is changed '
372 '(please commit .hgtags manually)'))
372 '(please commit .hgtags manually)'))
373
373
374 self.tags() # instantiate the cache
374 self.tags() # instantiate the cache
375 self._tag(names, node, message, local, user, date)
375 self._tag(names, node, message, local, user, date)
376
376
377 @propertycache
377 @propertycache
378 def _tagscache(self):
378 def _tagscache(self):
379 '''Returns a tagscache object that contains various tags related caches.'''
379 '''Returns a tagscache object that contains various tags related caches.'''
380
380
381 # This simplifies its cache management by having one decorated
381 # This simplifies its cache management by having one decorated
382 # function (this one) and the rest simply fetch things from it.
382 # function (this one) and the rest simply fetch things from it.
383 class tagscache(object):
383 class tagscache(object):
384 def __init__(self):
384 def __init__(self):
385 # These two define the set of tags for this repository. tags
385 # These two define the set of tags for this repository. tags
386 # maps tag name to node; tagtypes maps tag name to 'global' or
386 # maps tag name to node; tagtypes maps tag name to 'global' or
387 # 'local'. (Global tags are defined by .hgtags across all
387 # 'local'. (Global tags are defined by .hgtags across all
388 # heads, and local tags are defined in .hg/localtags.)
388 # heads, and local tags are defined in .hg/localtags.)
389 # They constitute the in-memory cache of tags.
389 # They constitute the in-memory cache of tags.
390 self.tags = self.tagtypes = None
390 self.tags = self.tagtypes = None
391
391
392 self.nodetagscache = self.tagslist = None
392 self.nodetagscache = self.tagslist = None
393
393
394 cache = tagscache()
394 cache = tagscache()
395 cache.tags, cache.tagtypes = self._findtags()
395 cache.tags, cache.tagtypes = self._findtags()
396
396
397 return cache
397 return cache
398
398
399 def tags(self):
399 def tags(self):
400 '''return a mapping of tag to node'''
400 '''return a mapping of tag to node'''
401 return self._tagscache.tags
401 return self._tagscache.tags
402
402
403 def _findtags(self):
403 def _findtags(self):
404 '''Do the hard work of finding tags. Return a pair of dicts
404 '''Do the hard work of finding tags. Return a pair of dicts
405 (tags, tagtypes) where tags maps tag name to node, and tagtypes
405 (tags, tagtypes) where tags maps tag name to node, and tagtypes
406 maps tag name to a string like \'global\' or \'local\'.
406 maps tag name to a string like \'global\' or \'local\'.
407 Subclasses or extensions are free to add their own tags, but
407 Subclasses or extensions are free to add their own tags, but
408 should be aware that the returned dicts will be retained for the
408 should be aware that the returned dicts will be retained for the
409 duration of the localrepo object.'''
409 duration of the localrepo object.'''
410
410
411 # XXX what tagtype should subclasses/extensions use? Currently
411 # XXX what tagtype should subclasses/extensions use? Currently
412 # mq and bookmarks add tags, but do not set the tagtype at all.
412 # mq and bookmarks add tags, but do not set the tagtype at all.
413 # Should each extension invent its own tag type? Should there
413 # Should each extension invent its own tag type? Should there
414 # be one tagtype for all such "virtual" tags? Or is the status
414 # be one tagtype for all such "virtual" tags? Or is the status
415 # quo fine?
415 # quo fine?
416
416
417 alltags = {} # map tag name to (node, hist)
417 alltags = {} # map tag name to (node, hist)
418 tagtypes = {}
418 tagtypes = {}
419
419
420 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
420 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
421 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
421 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
422
422
423 # Build the return dicts. Have to re-encode tag names because
423 # Build the return dicts. Have to re-encode tag names because
424 # the tags module always uses UTF-8 (in order not to lose info
424 # the tags module always uses UTF-8 (in order not to lose info
425 # writing to the cache), but the rest of Mercurial wants them in
425 # writing to the cache), but the rest of Mercurial wants them in
426 # local encoding.
426 # local encoding.
427 tags = {}
427 tags = {}
428 for (name, (node, hist)) in alltags.iteritems():
428 for (name, (node, hist)) in alltags.iteritems():
429 if node != nullid:
429 if node != nullid:
430 try:
430 try:
431 # ignore tags to unknown nodes
431 # ignore tags to unknown nodes
432 self.changelog.lookup(node)
432 self.changelog.lookup(node)
433 tags[encoding.tolocal(name)] = node
433 tags[encoding.tolocal(name)] = node
434 except error.LookupError:
434 except error.LookupError:
435 pass
435 pass
436 tags['tip'] = self.changelog.tip()
436 tags['tip'] = self.changelog.tip()
437 tagtypes = dict([(encoding.tolocal(name), value)
437 tagtypes = dict([(encoding.tolocal(name), value)
438 for (name, value) in tagtypes.iteritems()])
438 for (name, value) in tagtypes.iteritems()])
439 return (tags, tagtypes)
439 return (tags, tagtypes)
440
440
441 def tagtype(self, tagname):
441 def tagtype(self, tagname):
442 '''
442 '''
443 return the type of the given tag. result can be:
443 return the type of the given tag. result can be:
444
444
445 'local' : a local tag
445 'local' : a local tag
446 'global' : a global tag
446 'global' : a global tag
447 None : tag does not exist
447 None : tag does not exist
448 '''
448 '''
449
449
450 return self._tagscache.tagtypes.get(tagname)
450 return self._tagscache.tagtypes.get(tagname)
451
451
452 def tagslist(self):
452 def tagslist(self):
453 '''return a list of tags ordered by revision'''
453 '''return a list of tags ordered by revision'''
454 if not self._tagscache.tagslist:
454 if not self._tagscache.tagslist:
455 l = []
455 l = []
456 for t, n in self.tags().iteritems():
456 for t, n in self.tags().iteritems():
457 r = self.changelog.rev(n)
457 r = self.changelog.rev(n)
458 l.append((r, t, n))
458 l.append((r, t, n))
459 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
459 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
460
460
461 return self._tagscache.tagslist
461 return self._tagscache.tagslist
462
462
463 def nodetags(self, node):
463 def nodetags(self, node):
464 '''return the tags associated with a node'''
464 '''return the tags associated with a node'''
465 if not self._tagscache.nodetagscache:
465 if not self._tagscache.nodetagscache:
466 nodetagscache = {}
466 nodetagscache = {}
467 for t, n in self.tags().iteritems():
467 for t, n in self.tags().iteritems():
468 nodetagscache.setdefault(n, []).append(t)
468 nodetagscache.setdefault(n, []).append(t)
469 for tags in nodetagscache.itervalues():
469 for tags in nodetagscache.itervalues():
470 tags.sort()
470 tags.sort()
471 self._tagscache.nodetagscache = nodetagscache
471 self._tagscache.nodetagscache = nodetagscache
472 return self._tagscache.nodetagscache.get(node, [])
472 return self._tagscache.nodetagscache.get(node, [])
473
473
474 def nodebookmarks(self, node):
474 def nodebookmarks(self, node):
475 marks = []
475 marks = []
476 for bookmark, n in self._bookmarks.iteritems():
476 for bookmark, n in self._bookmarks.iteritems():
477 if n == node:
477 if n == node:
478 marks.append(bookmark)
478 marks.append(bookmark)
479 return sorted(marks)
479 return sorted(marks)
480
480
481 def _branchtags(self, partial, lrev):
481 def _branchtags(self, partial, lrev):
482 # TODO: rename this function?
482 # TODO: rename this function?
483 tiprev = len(self) - 1
483 tiprev = len(self) - 1
484 if lrev != tiprev:
484 if lrev != tiprev:
485 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
485 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
486 self._updatebranchcache(partial, ctxgen)
486 self._updatebranchcache(partial, ctxgen)
487 self._writebranchcache(partial, self.changelog.tip(), tiprev)
487 self._writebranchcache(partial, self.changelog.tip(), tiprev)
488
488
489 return partial
489 return partial
490
490
491 def updatebranchcache(self):
491 def updatebranchcache(self):
492 tip = self.changelog.tip()
492 tip = self.changelog.tip()
493 if self._branchcache is not None and self._branchcachetip == tip:
493 if self._branchcache is not None and self._branchcachetip == tip:
494 return
494 return
495
495
496 oldtip = self._branchcachetip
496 oldtip = self._branchcachetip
497 self._branchcachetip = tip
497 self._branchcachetip = tip
498 if oldtip is None or oldtip not in self.changelog.nodemap:
498 if oldtip is None or oldtip not in self.changelog.nodemap:
499 partial, last, lrev = self._readbranchcache()
499 partial, last, lrev = self._readbranchcache()
500 else:
500 else:
501 lrev = self.changelog.rev(oldtip)
501 lrev = self.changelog.rev(oldtip)
502 partial = self._branchcache
502 partial = self._branchcache
503
503
504 self._branchtags(partial, lrev)
504 self._branchtags(partial, lrev)
505 # this private cache holds all heads (not just tips)
505 # this private cache holds all heads (not just tips)
506 self._branchcache = partial
506 self._branchcache = partial
507
507
508 def branchmap(self):
508 def branchmap(self):
509 '''returns a dictionary {branch: [branchheads]}'''
509 '''returns a dictionary {branch: [branchheads]}'''
510 self.updatebranchcache()
510 self.updatebranchcache()
511 return self._branchcache
511 return self._branchcache
512
512
513 def branchtags(self):
513 def branchtags(self):
514 '''return a dict where branch names map to the tipmost head of
514 '''return a dict where branch names map to the tipmost head of
515 the branch, open heads come before closed'''
515 the branch, open heads come before closed'''
516 bt = {}
516 bt = {}
517 for bn, heads in self.branchmap().iteritems():
517 for bn, heads in self.branchmap().iteritems():
518 tip = heads[-1]
518 tip = heads[-1]
519 for h in reversed(heads):
519 for h in reversed(heads):
520 if 'close' not in self.changelog.read(h)[5]:
520 if 'close' not in self.changelog.read(h)[5]:
521 tip = h
521 tip = h
522 break
522 break
523 bt[bn] = tip
523 bt[bn] = tip
524 return bt
524 return bt
525
525
526 def _readbranchcache(self):
526 def _readbranchcache(self):
527 partial = {}
527 partial = {}
528 try:
528 try:
529 f = self.opener("cache/branchheads")
529 f = self.opener("cache/branchheads")
530 lines = f.read().split('\n')
530 lines = f.read().split('\n')
531 f.close()
531 f.close()
532 except (IOError, OSError):
532 except (IOError, OSError):
533 return {}, nullid, nullrev
533 return {}, nullid, nullrev
534
534
535 try:
535 try:
536 last, lrev = lines.pop(0).split(" ", 1)
536 last, lrev = lines.pop(0).split(" ", 1)
537 last, lrev = bin(last), int(lrev)
537 last, lrev = bin(last), int(lrev)
538 if lrev >= len(self) or self[lrev].node() != last:
538 if lrev >= len(self) or self[lrev].node() != last:
539 # invalidate the cache
539 # invalidate the cache
540 raise ValueError('invalidating branch cache (tip differs)')
540 raise ValueError('invalidating branch cache (tip differs)')
541 for l in lines:
541 for l in lines:
542 if not l:
542 if not l:
543 continue
543 continue
544 node, label = l.split(" ", 1)
544 node, label = l.split(" ", 1)
545 label = encoding.tolocal(label.strip())
545 label = encoding.tolocal(label.strip())
546 partial.setdefault(label, []).append(bin(node))
546 partial.setdefault(label, []).append(bin(node))
547 except KeyboardInterrupt:
547 except KeyboardInterrupt:
548 raise
548 raise
549 except Exception, inst:
549 except Exception, inst:
550 if self.ui.debugflag:
550 if self.ui.debugflag:
551 self.ui.warn(str(inst), '\n')
551 self.ui.warn(str(inst), '\n')
552 partial, last, lrev = {}, nullid, nullrev
552 partial, last, lrev = {}, nullid, nullrev
553 return partial, last, lrev
553 return partial, last, lrev
554
554
555 def _writebranchcache(self, branches, tip, tiprev):
555 def _writebranchcache(self, branches, tip, tiprev):
556 try:
556 try:
557 f = self.opener("cache/branchheads", "w", atomictemp=True)
557 f = self.opener("cache/branchheads", "w", atomictemp=True)
558 f.write("%s %s\n" % (hex(tip), tiprev))
558 f.write("%s %s\n" % (hex(tip), tiprev))
559 for label, nodes in branches.iteritems():
559 for label, nodes in branches.iteritems():
560 for node in nodes:
560 for node in nodes:
561 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
561 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
562 f.close()
562 f.close()
563 except (IOError, OSError):
563 except (IOError, OSError):
564 pass
564 pass
565
565
566 def _updatebranchcache(self, partial, ctxgen):
566 def _updatebranchcache(self, partial, ctxgen):
567 # collect new branch entries
567 # collect new branch entries
568 newbranches = {}
568 newbranches = {}
569 for c in ctxgen:
569 for c in ctxgen:
570 newbranches.setdefault(c.branch(), []).append(c.node())
570 newbranches.setdefault(c.branch(), []).append(c.node())
571 # if older branchheads are reachable from new ones, they aren't
571 # if older branchheads are reachable from new ones, they aren't
572 # really branchheads. Note checking parents is insufficient:
572 # really branchheads. Note checking parents is insufficient:
573 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
573 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
574 for branch, newnodes in newbranches.iteritems():
574 for branch, newnodes in newbranches.iteritems():
575 bheads = partial.setdefault(branch, [])
575 bheads = partial.setdefault(branch, [])
576 bheads.extend(newnodes)
576 bheads.extend(newnodes)
577 if len(bheads) <= 1:
577 if len(bheads) <= 1:
578 continue
578 continue
579 bheads = sorted(bheads, key=lambda x: self[x].rev())
579 bheads = sorted(bheads, key=lambda x: self[x].rev())
580 # starting from tip means fewer passes over reachable
580 # starting from tip means fewer passes over reachable
581 while newnodes:
581 while newnodes:
582 latest = newnodes.pop()
582 latest = newnodes.pop()
583 if latest not in bheads:
583 if latest not in bheads:
584 continue
584 continue
585 minbhrev = self[bheads[0]].node()
585 minbhrev = self[bheads[0]].node()
586 reachable = self.changelog.reachable(latest, minbhrev)
586 reachable = self.changelog.reachable(latest, minbhrev)
587 reachable.remove(latest)
587 reachable.remove(latest)
588 if reachable:
588 if reachable:
589 bheads = [b for b in bheads if b not in reachable]
589 bheads = [b for b in bheads if b not in reachable]
590 partial[branch] = bheads
590 partial[branch] = bheads
591
591
592 def lookup(self, key):
592 def lookup(self, key):
593 if isinstance(key, int):
593 if isinstance(key, int):
594 return self.changelog.node(key)
594 return self.changelog.node(key)
595 elif key == '.':
595 elif key == '.':
596 return self.dirstate.p1()
596 return self.dirstate.p1()
597 elif key == 'null':
597 elif key == 'null':
598 return nullid
598 return nullid
599 elif key == 'tip':
599 elif key == 'tip':
600 return self.changelog.tip()
600 return self.changelog.tip()
601 n = self.changelog._match(key)
601 n = self.changelog._match(key)
602 if n:
602 if n:
603 return n
603 return n
604 if key in self._bookmarks:
604 if key in self._bookmarks:
605 return self._bookmarks[key]
605 return self._bookmarks[key]
606 if key in self.tags():
606 if key in self.tags():
607 return self.tags()[key]
607 return self.tags()[key]
608 if key in self.branchtags():
608 if key in self.branchtags():
609 return self.branchtags()[key]
609 return self.branchtags()[key]
610 n = self.changelog._partialmatch(key)
610 n = self.changelog._partialmatch(key)
611 if n:
611 if n:
612 return n
612 return n
613
613
614 # can't find key, check if it might have come from damaged dirstate
614 # can't find key, check if it might have come from damaged dirstate
615 if key in self.dirstate.parents():
615 if key in self.dirstate.parents():
616 raise error.Abort(_("working directory has unknown parent '%s'!")
616 raise error.Abort(_("working directory has unknown parent '%s'!")
617 % short(key))
617 % short(key))
618 try:
618 try:
619 if len(key) == 20:
619 if len(key) == 20:
620 key = hex(key)
620 key = hex(key)
621 except TypeError:
621 except TypeError:
622 pass
622 pass
623 raise error.RepoLookupError(_("unknown revision '%s'") % key)
623 raise error.RepoLookupError(_("unknown revision '%s'") % key)
624
624
625 def lookupbranch(self, key, remote=None):
625 def lookupbranch(self, key, remote=None):
626 repo = remote or self
626 repo = remote or self
627 if key in repo.branchmap():
627 if key in repo.branchmap():
628 return key
628 return key
629
629
630 repo = (remote and remote.local()) and remote or self
630 repo = (remote and remote.local()) and remote or self
631 return repo[key].branch()
631 return repo[key].branch()
632
632
633 def known(self, nodes):
633 def known(self, nodes):
634 nm = self.changelog.nodemap
634 nm = self.changelog.nodemap
635 result = []
635 result = []
636 for n in nodes:
636 for n in nodes:
637 r = nm.get(n)
637 r = nm.get(n)
638 resp = not (r is None or self._phaserev[r] >= phases.secret)
638 resp = not (r is None or self._phaserev[r] >= phases.secret)
639 result.append(resp)
639 result.append(resp)
640 return result
640 return result
641
641
642 def local(self):
642 def local(self):
643 return self
643 return self
644
644
645 def join(self, f):
645 def join(self, f):
646 return os.path.join(self.path, f)
646 return os.path.join(self.path, f)
647
647
648 def wjoin(self, f):
648 def wjoin(self, f):
649 return os.path.join(self.root, f)
649 return os.path.join(self.root, f)
650
650
651 def file(self, f):
651 def file(self, f):
652 if f[0] == '/':
652 if f[0] == '/':
653 f = f[1:]
653 f = f[1:]
654 return filelog.filelog(self.sopener, f)
654 return filelog.filelog(self.sopener, f)
655
655
656 def changectx(self, changeid):
656 def changectx(self, changeid):
657 return self[changeid]
657 return self[changeid]
658
658
659 def parents(self, changeid=None):
659 def parents(self, changeid=None):
660 '''get list of changectxs for parents of changeid'''
660 '''get list of changectxs for parents of changeid'''
661 return self[changeid].parents()
661 return self[changeid].parents()
662
662
663 def filectx(self, path, changeid=None, fileid=None):
663 def filectx(self, path, changeid=None, fileid=None):
664 """changeid can be a changeset revision, node, or tag.
664 """changeid can be a changeset revision, node, or tag.
665 fileid can be a file revision or node."""
665 fileid can be a file revision or node."""
666 return context.filectx(self, path, changeid, fileid)
666 return context.filectx(self, path, changeid, fileid)
667
667
668 def getcwd(self):
668 def getcwd(self):
669 return self.dirstate.getcwd()
669 return self.dirstate.getcwd()
670
670
671 def pathto(self, f, cwd=None):
671 def pathto(self, f, cwd=None):
672 return self.dirstate.pathto(f, cwd)
672 return self.dirstate.pathto(f, cwd)
673
673
674 def wfile(self, f, mode='r'):
674 def wfile(self, f, mode='r'):
675 return self.wopener(f, mode)
675 return self.wopener(f, mode)
676
676
677 def _link(self, f):
677 def _link(self, f):
678 return os.path.islink(self.wjoin(f))
678 return os.path.islink(self.wjoin(f))
679
679
680 def _loadfilter(self, filter):
680 def _loadfilter(self, filter):
681 if filter not in self.filterpats:
681 if filter not in self.filterpats:
682 l = []
682 l = []
683 for pat, cmd in self.ui.configitems(filter):
683 for pat, cmd in self.ui.configitems(filter):
684 if cmd == '!':
684 if cmd == '!':
685 continue
685 continue
686 mf = matchmod.match(self.root, '', [pat])
686 mf = matchmod.match(self.root, '', [pat])
687 fn = None
687 fn = None
688 params = cmd
688 params = cmd
689 for name, filterfn in self._datafilters.iteritems():
689 for name, filterfn in self._datafilters.iteritems():
690 if cmd.startswith(name):
690 if cmd.startswith(name):
691 fn = filterfn
691 fn = filterfn
692 params = cmd[len(name):].lstrip()
692 params = cmd[len(name):].lstrip()
693 break
693 break
694 if not fn:
694 if not fn:
695 fn = lambda s, c, **kwargs: util.filter(s, c)
695 fn = lambda s, c, **kwargs: util.filter(s, c)
696 # Wrap old filters not supporting keyword arguments
696 # Wrap old filters not supporting keyword arguments
697 if not inspect.getargspec(fn)[2]:
697 if not inspect.getargspec(fn)[2]:
698 oldfn = fn
698 oldfn = fn
699 fn = lambda s, c, **kwargs: oldfn(s, c)
699 fn = lambda s, c, **kwargs: oldfn(s, c)
700 l.append((mf, fn, params))
700 l.append((mf, fn, params))
701 self.filterpats[filter] = l
701 self.filterpats[filter] = l
702 return self.filterpats[filter]
702 return self.filterpats[filter]
703
703
704 def _filter(self, filterpats, filename, data):
704 def _filter(self, filterpats, filename, data):
705 for mf, fn, cmd in filterpats:
705 for mf, fn, cmd in filterpats:
706 if mf(filename):
706 if mf(filename):
707 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
707 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
708 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
708 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
709 break
709 break
710
710
711 return data
711 return data
712
712
713 @propertycache
713 @propertycache
714 def _encodefilterpats(self):
714 def _encodefilterpats(self):
715 return self._loadfilter('encode')
715 return self._loadfilter('encode')
716
716
717 @propertycache
717 @propertycache
718 def _decodefilterpats(self):
718 def _decodefilterpats(self):
719 return self._loadfilter('decode')
719 return self._loadfilter('decode')
720
720
721 def adddatafilter(self, name, filter):
721 def adddatafilter(self, name, filter):
722 self._datafilters[name] = filter
722 self._datafilters[name] = filter
723
723
724 def wread(self, filename):
724 def wread(self, filename):
725 if self._link(filename):
725 if self._link(filename):
726 data = os.readlink(self.wjoin(filename))
726 data = os.readlink(self.wjoin(filename))
727 else:
727 else:
728 data = self.wopener.read(filename)
728 data = self.wopener.read(filename)
729 return self._filter(self._encodefilterpats, filename, data)
729 return self._filter(self._encodefilterpats, filename, data)
730
730
731 def wwrite(self, filename, data, flags):
731 def wwrite(self, filename, data, flags):
732 data = self._filter(self._decodefilterpats, filename, data)
732 data = self._filter(self._decodefilterpats, filename, data)
733 if 'l' in flags:
733 if 'l' in flags:
734 self.wopener.symlink(data, filename)
734 self.wopener.symlink(data, filename)
735 else:
735 else:
736 self.wopener.write(filename, data)
736 self.wopener.write(filename, data)
737 if 'x' in flags:
737 if 'x' in flags:
738 util.setflags(self.wjoin(filename), False, True)
738 util.setflags(self.wjoin(filename), False, True)
739
739
740 def wwritedata(self, filename, data):
740 def wwritedata(self, filename, data):
741 return self._filter(self._decodefilterpats, filename, data)
741 return self._filter(self._decodefilterpats, filename, data)
742
742
743 def transaction(self, desc):
743 def transaction(self, desc):
744 tr = self._transref and self._transref() or None
744 tr = self._transref and self._transref() or None
745 if tr and tr.running():
745 if tr and tr.running():
746 return tr.nest()
746 return tr.nest()
747
747
748 # abort here if the journal already exists
748 # abort here if the journal already exists
749 if os.path.exists(self.sjoin("journal")):
749 if os.path.exists(self.sjoin("journal")):
750 raise error.RepoError(
750 raise error.RepoError(
751 _("abandoned transaction found - run hg recover"))
751 _("abandoned transaction found - run hg recover"))
752
752
753 journalfiles = self._writejournal(desc)
753 journalfiles = self._writejournal(desc)
754 renames = [(x, undoname(x)) for x in journalfiles]
754 renames = [(x, undoname(x)) for x in journalfiles]
755
755
756 tr = transaction.transaction(self.ui.warn, self.sopener,
756 tr = transaction.transaction(self.ui.warn, self.sopener,
757 self.sjoin("journal"),
757 self.sjoin("journal"),
758 aftertrans(renames),
758 aftertrans(renames),
759 self.store.createmode)
759 self.store.createmode)
760 self._transref = weakref.ref(tr)
760 self._transref = weakref.ref(tr)
761 return tr
761 return tr
762
762
763 def _writejournal(self, desc):
763 def _writejournal(self, desc):
764 # save dirstate for rollback
764 # save dirstate for rollback
765 try:
765 try:
766 ds = self.opener.read("dirstate")
766 ds = self.opener.read("dirstate")
767 except IOError:
767 except IOError:
768 ds = ""
768 ds = ""
769 self.opener.write("journal.dirstate", ds)
769 self.opener.write("journal.dirstate", ds)
770 self.opener.write("journal.branch",
770 self.opener.write("journal.branch",
771 encoding.fromlocal(self.dirstate.branch()))
771 encoding.fromlocal(self.dirstate.branch()))
772 self.opener.write("journal.desc",
772 self.opener.write("journal.desc",
773 "%d\n%s\n" % (len(self), desc))
773 "%d\n%s\n" % (len(self), desc))
774
774
775 bkname = self.join('bookmarks')
775 bkname = self.join('bookmarks')
776 if os.path.exists(bkname):
776 if os.path.exists(bkname):
777 util.copyfile(bkname, self.join('journal.bookmarks'))
777 util.copyfile(bkname, self.join('journal.bookmarks'))
778 else:
778 else:
779 self.opener.write('journal.bookmarks', '')
779 self.opener.write('journal.bookmarks', '')
780 phasesname = self.sjoin('phaseroots')
780 phasesname = self.sjoin('phaseroots')
781 if os.path.exists(phasesname):
781 if os.path.exists(phasesname):
782 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
782 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
783 else:
783 else:
784 self.sopener.write('journal.phaseroots', '')
784 self.sopener.write('journal.phaseroots', '')
785
785
786 return (self.sjoin('journal'), self.join('journal.dirstate'),
786 return (self.sjoin('journal'), self.join('journal.dirstate'),
787 self.join('journal.branch'), self.join('journal.desc'),
787 self.join('journal.branch'), self.join('journal.desc'),
788 self.join('journal.bookmarks'),
788 self.join('journal.bookmarks'),
789 self.sjoin('journal.phaseroots'))
789 self.sjoin('journal.phaseroots'))
790
790
791 def recover(self):
791 def recover(self):
792 lock = self.lock()
792 lock = self.lock()
793 try:
793 try:
794 if os.path.exists(self.sjoin("journal")):
794 if os.path.exists(self.sjoin("journal")):
795 self.ui.status(_("rolling back interrupted transaction\n"))
795 self.ui.status(_("rolling back interrupted transaction\n"))
796 transaction.rollback(self.sopener, self.sjoin("journal"),
796 transaction.rollback(self.sopener, self.sjoin("journal"),
797 self.ui.warn)
797 self.ui.warn)
798 self.invalidate()
798 self.invalidate()
799 return True
799 return True
800 else:
800 else:
801 self.ui.warn(_("no interrupted transaction available\n"))
801 self.ui.warn(_("no interrupted transaction available\n"))
802 return False
802 return False
803 finally:
803 finally:
804 lock.release()
804 lock.release()
805
805
806 def rollback(self, dryrun=False, force=False):
806 def rollback(self, dryrun=False, force=False):
807 wlock = lock = None
807 wlock = lock = None
808 try:
808 try:
809 wlock = self.wlock()
809 wlock = self.wlock()
810 lock = self.lock()
810 lock = self.lock()
811 if os.path.exists(self.sjoin("undo")):
811 if os.path.exists(self.sjoin("undo")):
812 return self._rollback(dryrun, force)
812 return self._rollback(dryrun, force)
813 else:
813 else:
814 self.ui.warn(_("no rollback information available\n"))
814 self.ui.warn(_("no rollback information available\n"))
815 return 1
815 return 1
816 finally:
816 finally:
817 release(lock, wlock)
817 release(lock, wlock)
818
818
819 def _rollback(self, dryrun, force):
819 def _rollback(self, dryrun, force):
820 ui = self.ui
820 ui = self.ui
821 try:
821 try:
822 args = self.opener.read('undo.desc').splitlines()
822 args = self.opener.read('undo.desc').splitlines()
823 (oldlen, desc, detail) = (int(args[0]), args[1], None)
823 (oldlen, desc, detail) = (int(args[0]), args[1], None)
824 if len(args) >= 3:
824 if len(args) >= 3:
825 detail = args[2]
825 detail = args[2]
826 oldtip = oldlen - 1
826 oldtip = oldlen - 1
827
827
828 if detail and ui.verbose:
828 if detail and ui.verbose:
829 msg = (_('repository tip rolled back to revision %s'
829 msg = (_('repository tip rolled back to revision %s'
830 ' (undo %s: %s)\n')
830 ' (undo %s: %s)\n')
831 % (oldtip, desc, detail))
831 % (oldtip, desc, detail))
832 else:
832 else:
833 msg = (_('repository tip rolled back to revision %s'
833 msg = (_('repository tip rolled back to revision %s'
834 ' (undo %s)\n')
834 ' (undo %s)\n')
835 % (oldtip, desc))
835 % (oldtip, desc))
836 except IOError:
836 except IOError:
837 msg = _('rolling back unknown transaction\n')
837 msg = _('rolling back unknown transaction\n')
838 desc = None
838 desc = None
839
839
840 if not force and self['.'] != self['tip'] and desc == 'commit':
840 if not force and self['.'] != self['tip'] and desc == 'commit':
841 raise util.Abort(
841 raise util.Abort(
842 _('rollback of last commit while not checked out '
842 _('rollback of last commit while not checked out '
843 'may lose data'), hint=_('use -f to force'))
843 'may lose data'), hint=_('use -f to force'))
844
844
845 ui.status(msg)
845 ui.status(msg)
846 if dryrun:
846 if dryrun:
847 return 0
847 return 0
848
848
849 parents = self.dirstate.parents()
849 parents = self.dirstate.parents()
850 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
850 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
851 if os.path.exists(self.join('undo.bookmarks')):
851 if os.path.exists(self.join('undo.bookmarks')):
852 util.rename(self.join('undo.bookmarks'),
852 util.rename(self.join('undo.bookmarks'),
853 self.join('bookmarks'))
853 self.join('bookmarks'))
854 if os.path.exists(self.sjoin('undo.phaseroots')):
854 if os.path.exists(self.sjoin('undo.phaseroots')):
855 util.rename(self.sjoin('undo.phaseroots'),
855 util.rename(self.sjoin('undo.phaseroots'),
856 self.sjoin('phaseroots'))
856 self.sjoin('phaseroots'))
857 self.invalidate()
857 self.invalidate()
858
858
859 parentgone = (parents[0] not in self.changelog.nodemap or
859 parentgone = (parents[0] not in self.changelog.nodemap or
860 parents[1] not in self.changelog.nodemap)
860 parents[1] not in self.changelog.nodemap)
861 if parentgone:
861 if parentgone:
862 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
862 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
863 try:
863 try:
864 branch = self.opener.read('undo.branch')
864 branch = self.opener.read('undo.branch')
865 self.dirstate.setbranch(branch)
865 self.dirstate.setbranch(branch)
866 except IOError:
866 except IOError:
867 ui.warn(_('named branch could not be reset: '
867 ui.warn(_('named branch could not be reset: '
868 'current branch is still \'%s\'\n')
868 'current branch is still \'%s\'\n')
869 % self.dirstate.branch())
869 % self.dirstate.branch())
870
870
871 self.dirstate.invalidate()
871 self.dirstate.invalidate()
872 parents = tuple([p.rev() for p in self.parents()])
872 parents = tuple([p.rev() for p in self.parents()])
873 if len(parents) > 1:
873 if len(parents) > 1:
874 ui.status(_('working directory now based on '
874 ui.status(_('working directory now based on '
875 'revisions %d and %d\n') % parents)
875 'revisions %d and %d\n') % parents)
876 else:
876 else:
877 ui.status(_('working directory now based on '
877 ui.status(_('working directory now based on '
878 'revision %d\n') % parents)
878 'revision %d\n') % parents)
879 self.destroyed()
879 self.destroyed()
880 return 0
880 return 0
881
881
882 def invalidatecaches(self):
882 def invalidatecaches(self):
883 def delcache(name):
883 def delcache(name):
884 try:
884 try:
885 delattr(self, name)
885 delattr(self, name)
886 except AttributeError:
886 except AttributeError:
887 pass
887 pass
888
888
889 delcache('_tagscache')
889 delcache('_tagscache')
890 delcache('_phaserev')
890 delcache('_phaserev')
891
891
892 self._branchcache = None # in UTF-8
892 self._branchcache = None # in UTF-8
893 self._branchcachetip = None
893 self._branchcachetip = None
894
894
895 def invalidatedirstate(self):
895 def invalidatedirstate(self):
896 '''Invalidates the dirstate, causing the next call to dirstate
896 '''Invalidates the dirstate, causing the next call to dirstate
897 to check if it was modified since the last time it was read,
897 to check if it was modified since the last time it was read,
898 rereading it if it has.
898 rereading it if it has.
899
899
900 This is different to dirstate.invalidate() that it doesn't always
900 This is different to dirstate.invalidate() that it doesn't always
901 rereads the dirstate. Use dirstate.invalidate() if you want to
901 rereads the dirstate. Use dirstate.invalidate() if you want to
902 explicitly read the dirstate again (i.e. restoring it to a previous
902 explicitly read the dirstate again (i.e. restoring it to a previous
903 known good state).'''
903 known good state).'''
904 if 'dirstate' in self.__dict__:
905 for k in self.dirstate._filecache:
904 try:
906 try:
905 delattr(self, 'dirstate')
907 delattr(self.dirstate, k)
906 except AttributeError:
908 except AttributeError:
907 pass
909 pass
910 delattr(self, 'dirstate')
908
911
909 def invalidate(self):
912 def invalidate(self):
910 for k in self._filecache:
913 for k in self._filecache:
911 # dirstate is invalidated separately in invalidatedirstate()
914 # dirstate is invalidated separately in invalidatedirstate()
912 if k == 'dirstate':
915 if k == 'dirstate':
913 continue
916 continue
914
917
915 try:
918 try:
916 delattr(self, k)
919 delattr(self, k)
917 except AttributeError:
920 except AttributeError:
918 pass
921 pass
919 self.invalidatecaches()
922 self.invalidatecaches()
920
923
921 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
924 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
922 try:
925 try:
923 l = lock.lock(lockname, 0, releasefn, desc=desc)
926 l = lock.lock(lockname, 0, releasefn, desc=desc)
924 except error.LockHeld, inst:
927 except error.LockHeld, inst:
925 if not wait:
928 if not wait:
926 raise
929 raise
927 self.ui.warn(_("waiting for lock on %s held by %r\n") %
930 self.ui.warn(_("waiting for lock on %s held by %r\n") %
928 (desc, inst.locker))
931 (desc, inst.locker))
929 # default to 600 seconds timeout
932 # default to 600 seconds timeout
930 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
933 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
931 releasefn, desc=desc)
934 releasefn, desc=desc)
932 if acquirefn:
935 if acquirefn:
933 acquirefn()
936 acquirefn()
934 return l
937 return l
935
938
936 def _afterlock(self, callback):
939 def _afterlock(self, callback):
937 """add a callback to the current repository lock.
940 """add a callback to the current repository lock.
938
941
939 The callback will be executed on lock release."""
942 The callback will be executed on lock release."""
940 l = self._lockref and self._lockref()
943 l = self._lockref and self._lockref()
941 if l:
944 if l:
942 l.postrelease.append(callback)
945 l.postrelease.append(callback)
943
946
944 def lock(self, wait=True):
947 def lock(self, wait=True):
945 '''Lock the repository store (.hg/store) and return a weak reference
948 '''Lock the repository store (.hg/store) and return a weak reference
946 to the lock. Use this before modifying the store (e.g. committing or
949 to the lock. Use this before modifying the store (e.g. committing or
947 stripping). If you are opening a transaction, get a lock as well.)'''
950 stripping). If you are opening a transaction, get a lock as well.)'''
948 l = self._lockref and self._lockref()
951 l = self._lockref and self._lockref()
949 if l is not None and l.held:
952 if l is not None and l.held:
950 l.lock()
953 l.lock()
951 return l
954 return l
952
955
953 def unlock():
956 def unlock():
954 self.store.write()
957 self.store.write()
955 if self._dirtyphases:
958 if self._dirtyphases:
956 phases.writeroots(self)
959 phases.writeroots(self)
957 self._dirtyphases = False
960 self._dirtyphases = False
958 for k, ce in self._filecache.items():
961 for k, ce in self._filecache.items():
959 if k == 'dirstate':
962 if k == 'dirstate':
960 continue
963 continue
961 ce.refresh()
964 ce.refresh()
962
965
963 l = self._lock(self.sjoin("lock"), wait, unlock,
966 l = self._lock(self.sjoin("lock"), wait, unlock,
964 self.invalidate, _('repository %s') % self.origroot)
967 self.invalidate, _('repository %s') % self.origroot)
965 self._lockref = weakref.ref(l)
968 self._lockref = weakref.ref(l)
966 return l
969 return l
967
970
968 def wlock(self, wait=True):
971 def wlock(self, wait=True):
969 '''Lock the non-store parts of the repository (everything under
972 '''Lock the non-store parts of the repository (everything under
970 .hg except .hg/store) and return a weak reference to the lock.
973 .hg except .hg/store) and return a weak reference to the lock.
971 Use this before modifying files in .hg.'''
974 Use this before modifying files in .hg.'''
972 l = self._wlockref and self._wlockref()
975 l = self._wlockref and self._wlockref()
973 if l is not None and l.held:
976 if l is not None and l.held:
974 l.lock()
977 l.lock()
975 return l
978 return l
976
979
977 def unlock():
980 def unlock():
978 self.dirstate.write()
981 self.dirstate.write()
979 ce = self._filecache.get('dirstate')
982 ce = self._filecache.get('dirstate')
980 if ce:
983 if ce:
981 ce.refresh()
984 ce.refresh()
982
985
983 l = self._lock(self.join("wlock"), wait, unlock,
986 l = self._lock(self.join("wlock"), wait, unlock,
984 self.invalidatedirstate, _('working directory of %s') %
987 self.invalidatedirstate, _('working directory of %s') %
985 self.origroot)
988 self.origroot)
986 self._wlockref = weakref.ref(l)
989 self._wlockref = weakref.ref(l)
987 return l
990 return l
988
991
989 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
992 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
990 """
993 """
991 commit an individual file as part of a larger transaction
994 commit an individual file as part of a larger transaction
992 """
995 """
993
996
994 fname = fctx.path()
997 fname = fctx.path()
995 text = fctx.data()
998 text = fctx.data()
996 flog = self.file(fname)
999 flog = self.file(fname)
997 fparent1 = manifest1.get(fname, nullid)
1000 fparent1 = manifest1.get(fname, nullid)
998 fparent2 = fparent2o = manifest2.get(fname, nullid)
1001 fparent2 = fparent2o = manifest2.get(fname, nullid)
999
1002
1000 meta = {}
1003 meta = {}
1001 copy = fctx.renamed()
1004 copy = fctx.renamed()
1002 if copy and copy[0] != fname:
1005 if copy and copy[0] != fname:
1003 # Mark the new revision of this file as a copy of another
1006 # Mark the new revision of this file as a copy of another
1004 # file. This copy data will effectively act as a parent
1007 # file. This copy data will effectively act as a parent
1005 # of this new revision. If this is a merge, the first
1008 # of this new revision. If this is a merge, the first
1006 # parent will be the nullid (meaning "look up the copy data")
1009 # parent will be the nullid (meaning "look up the copy data")
1007 # and the second one will be the other parent. For example:
1010 # and the second one will be the other parent. For example:
1008 #
1011 #
1009 # 0 --- 1 --- 3 rev1 changes file foo
1012 # 0 --- 1 --- 3 rev1 changes file foo
1010 # \ / rev2 renames foo to bar and changes it
1013 # \ / rev2 renames foo to bar and changes it
1011 # \- 2 -/ rev3 should have bar with all changes and
1014 # \- 2 -/ rev3 should have bar with all changes and
1012 # should record that bar descends from
1015 # should record that bar descends from
1013 # bar in rev2 and foo in rev1
1016 # bar in rev2 and foo in rev1
1014 #
1017 #
1015 # this allows this merge to succeed:
1018 # this allows this merge to succeed:
1016 #
1019 #
1017 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1020 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1018 # \ / merging rev3 and rev4 should use bar@rev2
1021 # \ / merging rev3 and rev4 should use bar@rev2
1019 # \- 2 --- 4 as the merge base
1022 # \- 2 --- 4 as the merge base
1020 #
1023 #
1021
1024
1022 cfname = copy[0]
1025 cfname = copy[0]
1023 crev = manifest1.get(cfname)
1026 crev = manifest1.get(cfname)
1024 newfparent = fparent2
1027 newfparent = fparent2
1025
1028
1026 if manifest2: # branch merge
1029 if manifest2: # branch merge
1027 if fparent2 == nullid or crev is None: # copied on remote side
1030 if fparent2 == nullid or crev is None: # copied on remote side
1028 if cfname in manifest2:
1031 if cfname in manifest2:
1029 crev = manifest2[cfname]
1032 crev = manifest2[cfname]
1030 newfparent = fparent1
1033 newfparent = fparent1
1031
1034
1032 # find source in nearest ancestor if we've lost track
1035 # find source in nearest ancestor if we've lost track
1033 if not crev:
1036 if not crev:
1034 self.ui.debug(" %s: searching for copy revision for %s\n" %
1037 self.ui.debug(" %s: searching for copy revision for %s\n" %
1035 (fname, cfname))
1038 (fname, cfname))
1036 for ancestor in self[None].ancestors():
1039 for ancestor in self[None].ancestors():
1037 if cfname in ancestor:
1040 if cfname in ancestor:
1038 crev = ancestor[cfname].filenode()
1041 crev = ancestor[cfname].filenode()
1039 break
1042 break
1040
1043
1041 if crev:
1044 if crev:
1042 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1045 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1043 meta["copy"] = cfname
1046 meta["copy"] = cfname
1044 meta["copyrev"] = hex(crev)
1047 meta["copyrev"] = hex(crev)
1045 fparent1, fparent2 = nullid, newfparent
1048 fparent1, fparent2 = nullid, newfparent
1046 else:
1049 else:
1047 self.ui.warn(_("warning: can't find ancestor for '%s' "
1050 self.ui.warn(_("warning: can't find ancestor for '%s' "
1048 "copied from '%s'!\n") % (fname, cfname))
1051 "copied from '%s'!\n") % (fname, cfname))
1049
1052
1050 elif fparent2 != nullid:
1053 elif fparent2 != nullid:
1051 # is one parent an ancestor of the other?
1054 # is one parent an ancestor of the other?
1052 fparentancestor = flog.ancestor(fparent1, fparent2)
1055 fparentancestor = flog.ancestor(fparent1, fparent2)
1053 if fparentancestor == fparent1:
1056 if fparentancestor == fparent1:
1054 fparent1, fparent2 = fparent2, nullid
1057 fparent1, fparent2 = fparent2, nullid
1055 elif fparentancestor == fparent2:
1058 elif fparentancestor == fparent2:
1056 fparent2 = nullid
1059 fparent2 = nullid
1057
1060
1058 # is the file changed?
1061 # is the file changed?
1059 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1062 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1060 changelist.append(fname)
1063 changelist.append(fname)
1061 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1064 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1062
1065
1063 # are just the flags changed during merge?
1066 # are just the flags changed during merge?
1064 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1067 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1065 changelist.append(fname)
1068 changelist.append(fname)
1066
1069
1067 return fparent1
1070 return fparent1
1068
1071
1069 def commit(self, text="", user=None, date=None, match=None, force=False,
1072 def commit(self, text="", user=None, date=None, match=None, force=False,
1070 editor=False, extra={}):
1073 editor=False, extra={}):
1071 """Add a new revision to current repository.
1074 """Add a new revision to current repository.
1072
1075
1073 Revision information is gathered from the working directory,
1076 Revision information is gathered from the working directory,
1074 match can be used to filter the committed files. If editor is
1077 match can be used to filter the committed files. If editor is
1075 supplied, it is called to get a commit message.
1078 supplied, it is called to get a commit message.
1076 """
1079 """
1077
1080
1078 def fail(f, msg):
1081 def fail(f, msg):
1079 raise util.Abort('%s: %s' % (f, msg))
1082 raise util.Abort('%s: %s' % (f, msg))
1080
1083
1081 if not match:
1084 if not match:
1082 match = matchmod.always(self.root, '')
1085 match = matchmod.always(self.root, '')
1083
1086
1084 if not force:
1087 if not force:
1085 vdirs = []
1088 vdirs = []
1086 match.dir = vdirs.append
1089 match.dir = vdirs.append
1087 match.bad = fail
1090 match.bad = fail
1088
1091
1089 wlock = self.wlock()
1092 wlock = self.wlock()
1090 try:
1093 try:
1091 wctx = self[None]
1094 wctx = self[None]
1092 merge = len(wctx.parents()) > 1
1095 merge = len(wctx.parents()) > 1
1093
1096
1094 if (not force and merge and match and
1097 if (not force and merge and match and
1095 (match.files() or match.anypats())):
1098 (match.files() or match.anypats())):
1096 raise util.Abort(_('cannot partially commit a merge '
1099 raise util.Abort(_('cannot partially commit a merge '
1097 '(do not specify files or patterns)'))
1100 '(do not specify files or patterns)'))
1098
1101
1099 changes = self.status(match=match, clean=force)
1102 changes = self.status(match=match, clean=force)
1100 if force:
1103 if force:
1101 changes[0].extend(changes[6]) # mq may commit unchanged files
1104 changes[0].extend(changes[6]) # mq may commit unchanged files
1102
1105
1103 # check subrepos
1106 # check subrepos
1104 subs = []
1107 subs = []
1105 removedsubs = set()
1108 removedsubs = set()
1106 if '.hgsub' in wctx:
1109 if '.hgsub' in wctx:
1107 # only manage subrepos and .hgsubstate if .hgsub is present
1110 # only manage subrepos and .hgsubstate if .hgsub is present
1108 for p in wctx.parents():
1111 for p in wctx.parents():
1109 removedsubs.update(s for s in p.substate if match(s))
1112 removedsubs.update(s for s in p.substate if match(s))
1110 for s in wctx.substate:
1113 for s in wctx.substate:
1111 removedsubs.discard(s)
1114 removedsubs.discard(s)
1112 if match(s) and wctx.sub(s).dirty():
1115 if match(s) and wctx.sub(s).dirty():
1113 subs.append(s)
1116 subs.append(s)
1114 if (subs or removedsubs):
1117 if (subs or removedsubs):
1115 if (not match('.hgsub') and
1118 if (not match('.hgsub') and
1116 '.hgsub' in (wctx.modified() + wctx.added())):
1119 '.hgsub' in (wctx.modified() + wctx.added())):
1117 raise util.Abort(
1120 raise util.Abort(
1118 _("can't commit subrepos without .hgsub"))
1121 _("can't commit subrepos without .hgsub"))
1119 if '.hgsubstate' not in changes[0]:
1122 if '.hgsubstate' not in changes[0]:
1120 changes[0].insert(0, '.hgsubstate')
1123 changes[0].insert(0, '.hgsubstate')
1121 if '.hgsubstate' in changes[2]:
1124 if '.hgsubstate' in changes[2]:
1122 changes[2].remove('.hgsubstate')
1125 changes[2].remove('.hgsubstate')
1123 elif '.hgsub' in changes[2]:
1126 elif '.hgsub' in changes[2]:
1124 # clean up .hgsubstate when .hgsub is removed
1127 # clean up .hgsubstate when .hgsub is removed
1125 if ('.hgsubstate' in wctx and
1128 if ('.hgsubstate' in wctx and
1126 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1129 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1127 changes[2].insert(0, '.hgsubstate')
1130 changes[2].insert(0, '.hgsubstate')
1128
1131
1129 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1132 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1130 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1133 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1131 if changedsubs:
1134 if changedsubs:
1132 raise util.Abort(_("uncommitted changes in subrepo %s")
1135 raise util.Abort(_("uncommitted changes in subrepo %s")
1133 % changedsubs[0],
1136 % changedsubs[0],
1134 hint=_("use --subrepos for recursive commit"))
1137 hint=_("use --subrepos for recursive commit"))
1135
1138
1136 # make sure all explicit patterns are matched
1139 # make sure all explicit patterns are matched
1137 if not force and match.files():
1140 if not force and match.files():
1138 matched = set(changes[0] + changes[1] + changes[2])
1141 matched = set(changes[0] + changes[1] + changes[2])
1139
1142
1140 for f in match.files():
1143 for f in match.files():
1141 if f == '.' or f in matched or f in wctx.substate:
1144 if f == '.' or f in matched or f in wctx.substate:
1142 continue
1145 continue
1143 if f in changes[3]: # missing
1146 if f in changes[3]: # missing
1144 fail(f, _('file not found!'))
1147 fail(f, _('file not found!'))
1145 if f in vdirs: # visited directory
1148 if f in vdirs: # visited directory
1146 d = f + '/'
1149 d = f + '/'
1147 for mf in matched:
1150 for mf in matched:
1148 if mf.startswith(d):
1151 if mf.startswith(d):
1149 break
1152 break
1150 else:
1153 else:
1151 fail(f, _("no match under directory!"))
1154 fail(f, _("no match under directory!"))
1152 elif f not in self.dirstate:
1155 elif f not in self.dirstate:
1153 fail(f, _("file not tracked!"))
1156 fail(f, _("file not tracked!"))
1154
1157
1155 if (not force and not extra.get("close") and not merge
1158 if (not force and not extra.get("close") and not merge
1156 and not (changes[0] or changes[1] or changes[2])
1159 and not (changes[0] or changes[1] or changes[2])
1157 and wctx.branch() == wctx.p1().branch()):
1160 and wctx.branch() == wctx.p1().branch()):
1158 return None
1161 return None
1159
1162
1160 ms = mergemod.mergestate(self)
1163 ms = mergemod.mergestate(self)
1161 for f in changes[0]:
1164 for f in changes[0]:
1162 if f in ms and ms[f] == 'u':
1165 if f in ms and ms[f] == 'u':
1163 raise util.Abort(_("unresolved merge conflicts "
1166 raise util.Abort(_("unresolved merge conflicts "
1164 "(see hg help resolve)"))
1167 "(see hg help resolve)"))
1165
1168
1166 cctx = context.workingctx(self, text, user, date, extra, changes)
1169 cctx = context.workingctx(self, text, user, date, extra, changes)
1167 if editor:
1170 if editor:
1168 cctx._text = editor(self, cctx, subs)
1171 cctx._text = editor(self, cctx, subs)
1169 edited = (text != cctx._text)
1172 edited = (text != cctx._text)
1170
1173
1171 # commit subs
1174 # commit subs
1172 if subs or removedsubs:
1175 if subs or removedsubs:
1173 state = wctx.substate.copy()
1176 state = wctx.substate.copy()
1174 for s in sorted(subs):
1177 for s in sorted(subs):
1175 sub = wctx.sub(s)
1178 sub = wctx.sub(s)
1176 self.ui.status(_('committing subrepository %s\n') %
1179 self.ui.status(_('committing subrepository %s\n') %
1177 subrepo.subrelpath(sub))
1180 subrepo.subrelpath(sub))
1178 sr = sub.commit(cctx._text, user, date)
1181 sr = sub.commit(cctx._text, user, date)
1179 state[s] = (state[s][0], sr)
1182 state[s] = (state[s][0], sr)
1180 subrepo.writestate(self, state)
1183 subrepo.writestate(self, state)
1181
1184
1182 # Save commit message in case this transaction gets rolled back
1185 # Save commit message in case this transaction gets rolled back
1183 # (e.g. by a pretxncommit hook). Leave the content alone on
1186 # (e.g. by a pretxncommit hook). Leave the content alone on
1184 # the assumption that the user will use the same editor again.
1187 # the assumption that the user will use the same editor again.
1185 msgfn = self.savecommitmessage(cctx._text)
1188 msgfn = self.savecommitmessage(cctx._text)
1186
1189
1187 p1, p2 = self.dirstate.parents()
1190 p1, p2 = self.dirstate.parents()
1188 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1191 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1189 try:
1192 try:
1190 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1193 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1191 ret = self.commitctx(cctx, True)
1194 ret = self.commitctx(cctx, True)
1192 except:
1195 except:
1193 if edited:
1196 if edited:
1194 self.ui.write(
1197 self.ui.write(
1195 _('note: commit message saved in %s\n') % msgfn)
1198 _('note: commit message saved in %s\n') % msgfn)
1196 raise
1199 raise
1197
1200
1198 # update bookmarks, dirstate and mergestate
1201 # update bookmarks, dirstate and mergestate
1199 bookmarks.update(self, p1, ret)
1202 bookmarks.update(self, p1, ret)
1200 for f in changes[0] + changes[1]:
1203 for f in changes[0] + changes[1]:
1201 self.dirstate.normal(f)
1204 self.dirstate.normal(f)
1202 for f in changes[2]:
1205 for f in changes[2]:
1203 self.dirstate.drop(f)
1206 self.dirstate.drop(f)
1204 self.dirstate.setparents(ret)
1207 self.dirstate.setparents(ret)
1205 ms.reset()
1208 ms.reset()
1206 finally:
1209 finally:
1207 wlock.release()
1210 wlock.release()
1208
1211
1209 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1212 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1210 return ret
1213 return ret
1211
1214
1212 def commitctx(self, ctx, error=False):
1215 def commitctx(self, ctx, error=False):
1213 """Add a new revision to current repository.
1216 """Add a new revision to current repository.
1214 Revision information is passed via the context argument.
1217 Revision information is passed via the context argument.
1215 """
1218 """
1216
1219
1217 tr = lock = None
1220 tr = lock = None
1218 removed = list(ctx.removed())
1221 removed = list(ctx.removed())
1219 p1, p2 = ctx.p1(), ctx.p2()
1222 p1, p2 = ctx.p1(), ctx.p2()
1220 user = ctx.user()
1223 user = ctx.user()
1221
1224
1222 lock = self.lock()
1225 lock = self.lock()
1223 try:
1226 try:
1224 tr = self.transaction("commit")
1227 tr = self.transaction("commit")
1225 trp = weakref.proxy(tr)
1228 trp = weakref.proxy(tr)
1226
1229
1227 if ctx.files():
1230 if ctx.files():
1228 m1 = p1.manifest().copy()
1231 m1 = p1.manifest().copy()
1229 m2 = p2.manifest()
1232 m2 = p2.manifest()
1230
1233
1231 # check in files
1234 # check in files
1232 new = {}
1235 new = {}
1233 changed = []
1236 changed = []
1234 linkrev = len(self)
1237 linkrev = len(self)
1235 for f in sorted(ctx.modified() + ctx.added()):
1238 for f in sorted(ctx.modified() + ctx.added()):
1236 self.ui.note(f + "\n")
1239 self.ui.note(f + "\n")
1237 try:
1240 try:
1238 fctx = ctx[f]
1241 fctx = ctx[f]
1239 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1242 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1240 changed)
1243 changed)
1241 m1.set(f, fctx.flags())
1244 m1.set(f, fctx.flags())
1242 except OSError, inst:
1245 except OSError, inst:
1243 self.ui.warn(_("trouble committing %s!\n") % f)
1246 self.ui.warn(_("trouble committing %s!\n") % f)
1244 raise
1247 raise
1245 except IOError, inst:
1248 except IOError, inst:
1246 errcode = getattr(inst, 'errno', errno.ENOENT)
1249 errcode = getattr(inst, 'errno', errno.ENOENT)
1247 if error or errcode and errcode != errno.ENOENT:
1250 if error or errcode and errcode != errno.ENOENT:
1248 self.ui.warn(_("trouble committing %s!\n") % f)
1251 self.ui.warn(_("trouble committing %s!\n") % f)
1249 raise
1252 raise
1250 else:
1253 else:
1251 removed.append(f)
1254 removed.append(f)
1252
1255
1253 # update manifest
1256 # update manifest
1254 m1.update(new)
1257 m1.update(new)
1255 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1258 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1256 drop = [f for f in removed if f in m1]
1259 drop = [f for f in removed if f in m1]
1257 for f in drop:
1260 for f in drop:
1258 del m1[f]
1261 del m1[f]
1259 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1262 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1260 p2.manifestnode(), (new, drop))
1263 p2.manifestnode(), (new, drop))
1261 files = changed + removed
1264 files = changed + removed
1262 else:
1265 else:
1263 mn = p1.manifestnode()
1266 mn = p1.manifestnode()
1264 files = []
1267 files = []
1265
1268
1266 # update changelog
1269 # update changelog
1267 self.changelog.delayupdate()
1270 self.changelog.delayupdate()
1268 n = self.changelog.add(mn, files, ctx.description(),
1271 n = self.changelog.add(mn, files, ctx.description(),
1269 trp, p1.node(), p2.node(),
1272 trp, p1.node(), p2.node(),
1270 user, ctx.date(), ctx.extra().copy())
1273 user, ctx.date(), ctx.extra().copy())
1271 p = lambda: self.changelog.writepending() and self.root or ""
1274 p = lambda: self.changelog.writepending() and self.root or ""
1272 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1275 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1273 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1276 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1274 parent2=xp2, pending=p)
1277 parent2=xp2, pending=p)
1275 self.changelog.finalize(trp)
1278 self.changelog.finalize(trp)
1276 # set the new commit is proper phase
1279 # set the new commit is proper phase
1277 targetphase = phases.newcommitphase(self.ui)
1280 targetphase = phases.newcommitphase(self.ui)
1278 if targetphase:
1281 if targetphase:
1279 # retract boundary do not alter parent changeset.
1282 # retract boundary do not alter parent changeset.
1280 # if a parent have higher the resulting phase will
1283 # if a parent have higher the resulting phase will
1281 # be compliant anyway
1284 # be compliant anyway
1282 #
1285 #
1283 # if minimal phase was 0 we don't need to retract anything
1286 # if minimal phase was 0 we don't need to retract anything
1284 phases.retractboundary(self, targetphase, [n])
1287 phases.retractboundary(self, targetphase, [n])
1285 tr.close()
1288 tr.close()
1286 self.updatebranchcache()
1289 self.updatebranchcache()
1287 return n
1290 return n
1288 finally:
1291 finally:
1289 if tr:
1292 if tr:
1290 tr.release()
1293 tr.release()
1291 lock.release()
1294 lock.release()
1292
1295
1293 def destroyed(self):
1296 def destroyed(self):
1294 '''Inform the repository that nodes have been destroyed.
1297 '''Inform the repository that nodes have been destroyed.
1295 Intended for use by strip and rollback, so there's a common
1298 Intended for use by strip and rollback, so there's a common
1296 place for anything that has to be done after destroying history.'''
1299 place for anything that has to be done after destroying history.'''
1297 # XXX it might be nice if we could take the list of destroyed
1300 # XXX it might be nice if we could take the list of destroyed
1298 # nodes, but I don't see an easy way for rollback() to do that
1301 # nodes, but I don't see an easy way for rollback() to do that
1299
1302
1300 # Ensure the persistent tag cache is updated. Doing it now
1303 # Ensure the persistent tag cache is updated. Doing it now
1301 # means that the tag cache only has to worry about destroyed
1304 # means that the tag cache only has to worry about destroyed
1302 # heads immediately after a strip/rollback. That in turn
1305 # heads immediately after a strip/rollback. That in turn
1303 # guarantees that "cachetip == currenttip" (comparing both rev
1306 # guarantees that "cachetip == currenttip" (comparing both rev
1304 # and node) always means no nodes have been added or destroyed.
1307 # and node) always means no nodes have been added or destroyed.
1305
1308
1306 # XXX this is suboptimal when qrefresh'ing: we strip the current
1309 # XXX this is suboptimal when qrefresh'ing: we strip the current
1307 # head, refresh the tag cache, then immediately add a new head.
1310 # head, refresh the tag cache, then immediately add a new head.
1308 # But I think doing it this way is necessary for the "instant
1311 # But I think doing it this way is necessary for the "instant
1309 # tag cache retrieval" case to work.
1312 # tag cache retrieval" case to work.
1310 self.invalidatecaches()
1313 self.invalidatecaches()
1311
1314
1312 # Discard all cache entries to force reloading everything.
1315 # Discard all cache entries to force reloading everything.
1313 self._filecache.clear()
1316 self._filecache.clear()
1314
1317
1315 def walk(self, match, node=None):
1318 def walk(self, match, node=None):
1316 '''
1319 '''
1317 walk recursively through the directory tree or a given
1320 walk recursively through the directory tree or a given
1318 changeset, finding all files matched by the match
1321 changeset, finding all files matched by the match
1319 function
1322 function
1320 '''
1323 '''
1321 return self[node].walk(match)
1324 return self[node].walk(match)
1322
1325
1323 def status(self, node1='.', node2=None, match=None,
1326 def status(self, node1='.', node2=None, match=None,
1324 ignored=False, clean=False, unknown=False,
1327 ignored=False, clean=False, unknown=False,
1325 listsubrepos=False):
1328 listsubrepos=False):
1326 """return status of files between two nodes or node and working directory
1329 """return status of files between two nodes or node and working directory
1327
1330
1328 If node1 is None, use the first dirstate parent instead.
1331 If node1 is None, use the first dirstate parent instead.
1329 If node2 is None, compare node1 with working directory.
1332 If node2 is None, compare node1 with working directory.
1330 """
1333 """
1331
1334
1332 def mfmatches(ctx):
1335 def mfmatches(ctx):
1333 mf = ctx.manifest().copy()
1336 mf = ctx.manifest().copy()
1334 for fn in mf.keys():
1337 for fn in mf.keys():
1335 if not match(fn):
1338 if not match(fn):
1336 del mf[fn]
1339 del mf[fn]
1337 return mf
1340 return mf
1338
1341
1339 if isinstance(node1, context.changectx):
1342 if isinstance(node1, context.changectx):
1340 ctx1 = node1
1343 ctx1 = node1
1341 else:
1344 else:
1342 ctx1 = self[node1]
1345 ctx1 = self[node1]
1343 if isinstance(node2, context.changectx):
1346 if isinstance(node2, context.changectx):
1344 ctx2 = node2
1347 ctx2 = node2
1345 else:
1348 else:
1346 ctx2 = self[node2]
1349 ctx2 = self[node2]
1347
1350
1348 working = ctx2.rev() is None
1351 working = ctx2.rev() is None
1349 parentworking = working and ctx1 == self['.']
1352 parentworking = working and ctx1 == self['.']
1350 match = match or matchmod.always(self.root, self.getcwd())
1353 match = match or matchmod.always(self.root, self.getcwd())
1351 listignored, listclean, listunknown = ignored, clean, unknown
1354 listignored, listclean, listunknown = ignored, clean, unknown
1352
1355
1353 # load earliest manifest first for caching reasons
1356 # load earliest manifest first for caching reasons
1354 if not working and ctx2.rev() < ctx1.rev():
1357 if not working and ctx2.rev() < ctx1.rev():
1355 ctx2.manifest()
1358 ctx2.manifest()
1356
1359
1357 if not parentworking:
1360 if not parentworking:
1358 def bad(f, msg):
1361 def bad(f, msg):
1359 # 'f' may be a directory pattern from 'match.files()',
1362 # 'f' may be a directory pattern from 'match.files()',
1360 # so 'f not in ctx1' is not enough
1363 # so 'f not in ctx1' is not enough
1361 if f not in ctx1 and f not in ctx1.dirs():
1364 if f not in ctx1 and f not in ctx1.dirs():
1362 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1365 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1363 match.bad = bad
1366 match.bad = bad
1364
1367
1365 if working: # we need to scan the working dir
1368 if working: # we need to scan the working dir
1366 subrepos = []
1369 subrepos = []
1367 if '.hgsub' in self.dirstate:
1370 if '.hgsub' in self.dirstate:
1368 subrepos = ctx2.substate.keys()
1371 subrepos = ctx2.substate.keys()
1369 s = self.dirstate.status(match, subrepos, listignored,
1372 s = self.dirstate.status(match, subrepos, listignored,
1370 listclean, listunknown)
1373 listclean, listunknown)
1371 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1374 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1372
1375
1373 # check for any possibly clean files
1376 # check for any possibly clean files
1374 if parentworking and cmp:
1377 if parentworking and cmp:
1375 fixup = []
1378 fixup = []
1376 # do a full compare of any files that might have changed
1379 # do a full compare of any files that might have changed
1377 for f in sorted(cmp):
1380 for f in sorted(cmp):
1378 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1381 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1379 or ctx1[f].cmp(ctx2[f])):
1382 or ctx1[f].cmp(ctx2[f])):
1380 modified.append(f)
1383 modified.append(f)
1381 else:
1384 else:
1382 fixup.append(f)
1385 fixup.append(f)
1383
1386
1384 # update dirstate for files that are actually clean
1387 # update dirstate for files that are actually clean
1385 if fixup:
1388 if fixup:
1386 if listclean:
1389 if listclean:
1387 clean += fixup
1390 clean += fixup
1388
1391
1389 try:
1392 try:
1390 # updating the dirstate is optional
1393 # updating the dirstate is optional
1391 # so we don't wait on the lock
1394 # so we don't wait on the lock
1392 wlock = self.wlock(False)
1395 wlock = self.wlock(False)
1393 try:
1396 try:
1394 for f in fixup:
1397 for f in fixup:
1395 self.dirstate.normal(f)
1398 self.dirstate.normal(f)
1396 finally:
1399 finally:
1397 wlock.release()
1400 wlock.release()
1398 except error.LockError:
1401 except error.LockError:
1399 pass
1402 pass
1400
1403
1401 if not parentworking:
1404 if not parentworking:
1402 mf1 = mfmatches(ctx1)
1405 mf1 = mfmatches(ctx1)
1403 if working:
1406 if working:
1404 # we are comparing working dir against non-parent
1407 # we are comparing working dir against non-parent
1405 # generate a pseudo-manifest for the working dir
1408 # generate a pseudo-manifest for the working dir
1406 mf2 = mfmatches(self['.'])
1409 mf2 = mfmatches(self['.'])
1407 for f in cmp + modified + added:
1410 for f in cmp + modified + added:
1408 mf2[f] = None
1411 mf2[f] = None
1409 mf2.set(f, ctx2.flags(f))
1412 mf2.set(f, ctx2.flags(f))
1410 for f in removed:
1413 for f in removed:
1411 if f in mf2:
1414 if f in mf2:
1412 del mf2[f]
1415 del mf2[f]
1413 else:
1416 else:
1414 # we are comparing two revisions
1417 # we are comparing two revisions
1415 deleted, unknown, ignored = [], [], []
1418 deleted, unknown, ignored = [], [], []
1416 mf2 = mfmatches(ctx2)
1419 mf2 = mfmatches(ctx2)
1417
1420
1418 modified, added, clean = [], [], []
1421 modified, added, clean = [], [], []
1419 for fn in mf2:
1422 for fn in mf2:
1420 if fn in mf1:
1423 if fn in mf1:
1421 if (fn not in deleted and
1424 if (fn not in deleted and
1422 (mf1.flags(fn) != mf2.flags(fn) or
1425 (mf1.flags(fn) != mf2.flags(fn) or
1423 (mf1[fn] != mf2[fn] and
1426 (mf1[fn] != mf2[fn] and
1424 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1427 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1425 modified.append(fn)
1428 modified.append(fn)
1426 elif listclean:
1429 elif listclean:
1427 clean.append(fn)
1430 clean.append(fn)
1428 del mf1[fn]
1431 del mf1[fn]
1429 elif fn not in deleted:
1432 elif fn not in deleted:
1430 added.append(fn)
1433 added.append(fn)
1431 removed = mf1.keys()
1434 removed = mf1.keys()
1432
1435
1433 if working and modified and not self.dirstate._checklink:
1436 if working and modified and not self.dirstate._checklink:
1434 # Symlink placeholders may get non-symlink-like contents
1437 # Symlink placeholders may get non-symlink-like contents
1435 # via user error or dereferencing by NFS or Samba servers,
1438 # via user error or dereferencing by NFS or Samba servers,
1436 # so we filter out any placeholders that don't look like a
1439 # so we filter out any placeholders that don't look like a
1437 # symlink
1440 # symlink
1438 sane = []
1441 sane = []
1439 for f in modified:
1442 for f in modified:
1440 if ctx2.flags(f) == 'l':
1443 if ctx2.flags(f) == 'l':
1441 d = ctx2[f].data()
1444 d = ctx2[f].data()
1442 if len(d) >= 1024 or '\n' in d or util.binary(d):
1445 if len(d) >= 1024 or '\n' in d or util.binary(d):
1443 self.ui.debug('ignoring suspect symlink placeholder'
1446 self.ui.debug('ignoring suspect symlink placeholder'
1444 ' "%s"\n' % f)
1447 ' "%s"\n' % f)
1445 continue
1448 continue
1446 sane.append(f)
1449 sane.append(f)
1447 modified = sane
1450 modified = sane
1448
1451
1449 r = modified, added, removed, deleted, unknown, ignored, clean
1452 r = modified, added, removed, deleted, unknown, ignored, clean
1450
1453
1451 if listsubrepos:
1454 if listsubrepos:
1452 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1455 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1453 if working:
1456 if working:
1454 rev2 = None
1457 rev2 = None
1455 else:
1458 else:
1456 rev2 = ctx2.substate[subpath][1]
1459 rev2 = ctx2.substate[subpath][1]
1457 try:
1460 try:
1458 submatch = matchmod.narrowmatcher(subpath, match)
1461 submatch = matchmod.narrowmatcher(subpath, match)
1459 s = sub.status(rev2, match=submatch, ignored=listignored,
1462 s = sub.status(rev2, match=submatch, ignored=listignored,
1460 clean=listclean, unknown=listunknown,
1463 clean=listclean, unknown=listunknown,
1461 listsubrepos=True)
1464 listsubrepos=True)
1462 for rfiles, sfiles in zip(r, s):
1465 for rfiles, sfiles in zip(r, s):
1463 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1466 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1464 except error.LookupError:
1467 except error.LookupError:
1465 self.ui.status(_("skipping missing subrepository: %s\n")
1468 self.ui.status(_("skipping missing subrepository: %s\n")
1466 % subpath)
1469 % subpath)
1467
1470
1468 for l in r:
1471 for l in r:
1469 l.sort()
1472 l.sort()
1470 return r
1473 return r
1471
1474
1472 def heads(self, start=None):
1475 def heads(self, start=None):
1473 heads = self.changelog.heads(start)
1476 heads = self.changelog.heads(start)
1474 # sort the output in rev descending order
1477 # sort the output in rev descending order
1475 return sorted(heads, key=self.changelog.rev, reverse=True)
1478 return sorted(heads, key=self.changelog.rev, reverse=True)
1476
1479
1477 def branchheads(self, branch=None, start=None, closed=False):
1480 def branchheads(self, branch=None, start=None, closed=False):
1478 '''return a (possibly filtered) list of heads for the given branch
1481 '''return a (possibly filtered) list of heads for the given branch
1479
1482
1480 Heads are returned in topological order, from newest to oldest.
1483 Heads are returned in topological order, from newest to oldest.
1481 If branch is None, use the dirstate branch.
1484 If branch is None, use the dirstate branch.
1482 If start is not None, return only heads reachable from start.
1485 If start is not None, return only heads reachable from start.
1483 If closed is True, return heads that are marked as closed as well.
1486 If closed is True, return heads that are marked as closed as well.
1484 '''
1487 '''
1485 if branch is None:
1488 if branch is None:
1486 branch = self[None].branch()
1489 branch = self[None].branch()
1487 branches = self.branchmap()
1490 branches = self.branchmap()
1488 if branch not in branches:
1491 if branch not in branches:
1489 return []
1492 return []
1490 # the cache returns heads ordered lowest to highest
1493 # the cache returns heads ordered lowest to highest
1491 bheads = list(reversed(branches[branch]))
1494 bheads = list(reversed(branches[branch]))
1492 if start is not None:
1495 if start is not None:
1493 # filter out the heads that cannot be reached from startrev
1496 # filter out the heads that cannot be reached from startrev
1494 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1497 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1495 bheads = [h for h in bheads if h in fbheads]
1498 bheads = [h for h in bheads if h in fbheads]
1496 if not closed:
1499 if not closed:
1497 bheads = [h for h in bheads if
1500 bheads = [h for h in bheads if
1498 ('close' not in self.changelog.read(h)[5])]
1501 ('close' not in self.changelog.read(h)[5])]
1499 return bheads
1502 return bheads
1500
1503
1501 def branches(self, nodes):
1504 def branches(self, nodes):
1502 if not nodes:
1505 if not nodes:
1503 nodes = [self.changelog.tip()]
1506 nodes = [self.changelog.tip()]
1504 b = []
1507 b = []
1505 for n in nodes:
1508 for n in nodes:
1506 t = n
1509 t = n
1507 while True:
1510 while True:
1508 p = self.changelog.parents(n)
1511 p = self.changelog.parents(n)
1509 if p[1] != nullid or p[0] == nullid:
1512 if p[1] != nullid or p[0] == nullid:
1510 b.append((t, n, p[0], p[1]))
1513 b.append((t, n, p[0], p[1]))
1511 break
1514 break
1512 n = p[0]
1515 n = p[0]
1513 return b
1516 return b
1514
1517
1515 def between(self, pairs):
1518 def between(self, pairs):
1516 r = []
1519 r = []
1517
1520
1518 for top, bottom in pairs:
1521 for top, bottom in pairs:
1519 n, l, i = top, [], 0
1522 n, l, i = top, [], 0
1520 f = 1
1523 f = 1
1521
1524
1522 while n != bottom and n != nullid:
1525 while n != bottom and n != nullid:
1523 p = self.changelog.parents(n)[0]
1526 p = self.changelog.parents(n)[0]
1524 if i == f:
1527 if i == f:
1525 l.append(n)
1528 l.append(n)
1526 f = f * 2
1529 f = f * 2
1527 n = p
1530 n = p
1528 i += 1
1531 i += 1
1529
1532
1530 r.append(l)
1533 r.append(l)
1531
1534
1532 return r
1535 return r
1533
1536
1534 def pull(self, remote, heads=None, force=False):
1537 def pull(self, remote, heads=None, force=False):
1535 lock = self.lock()
1538 lock = self.lock()
1536 try:
1539 try:
1537 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1540 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1538 force=force)
1541 force=force)
1539 common, fetch, rheads = tmp
1542 common, fetch, rheads = tmp
1540 if not fetch:
1543 if not fetch:
1541 self.ui.status(_("no changes found\n"))
1544 self.ui.status(_("no changes found\n"))
1542 added = []
1545 added = []
1543 result = 0
1546 result = 0
1544 else:
1547 else:
1545 if heads is None and list(common) == [nullid]:
1548 if heads is None and list(common) == [nullid]:
1546 self.ui.status(_("requesting all changes\n"))
1549 self.ui.status(_("requesting all changes\n"))
1547 elif heads is None and remote.capable('changegroupsubset'):
1550 elif heads is None and remote.capable('changegroupsubset'):
1548 # issue1320, avoid a race if remote changed after discovery
1551 # issue1320, avoid a race if remote changed after discovery
1549 heads = rheads
1552 heads = rheads
1550
1553
1551 if remote.capable('getbundle'):
1554 if remote.capable('getbundle'):
1552 cg = remote.getbundle('pull', common=common,
1555 cg = remote.getbundle('pull', common=common,
1553 heads=heads or rheads)
1556 heads=heads or rheads)
1554 elif heads is None:
1557 elif heads is None:
1555 cg = remote.changegroup(fetch, 'pull')
1558 cg = remote.changegroup(fetch, 'pull')
1556 elif not remote.capable('changegroupsubset'):
1559 elif not remote.capable('changegroupsubset'):
1557 raise util.Abort(_("partial pull cannot be done because "
1560 raise util.Abort(_("partial pull cannot be done because "
1558 "other repository doesn't support "
1561 "other repository doesn't support "
1559 "changegroupsubset."))
1562 "changegroupsubset."))
1560 else:
1563 else:
1561 cg = remote.changegroupsubset(fetch, heads, 'pull')
1564 cg = remote.changegroupsubset(fetch, heads, 'pull')
1562 clstart = len(self.changelog)
1565 clstart = len(self.changelog)
1563 result = self.addchangegroup(cg, 'pull', remote.url())
1566 result = self.addchangegroup(cg, 'pull', remote.url())
1564 clend = len(self.changelog)
1567 clend = len(self.changelog)
1565 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1568 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1566
1569
1567 # compute target subset
1570 # compute target subset
1568 if heads is None:
1571 if heads is None:
1569 # We pulled every thing possible
1572 # We pulled every thing possible
1570 # sync on everything common
1573 # sync on everything common
1571 subset = common + added
1574 subset = common + added
1572 else:
1575 else:
1573 # We pulled a specific subset
1576 # We pulled a specific subset
1574 # sync on this subset
1577 # sync on this subset
1575 subset = heads
1578 subset = heads
1576
1579
1577 # Get remote phases data from remote
1580 # Get remote phases data from remote
1578 remotephases = remote.listkeys('phases')
1581 remotephases = remote.listkeys('phases')
1579 publishing = bool(remotephases.get('publishing', False))
1582 publishing = bool(remotephases.get('publishing', False))
1580 if remotephases and not publishing:
1583 if remotephases and not publishing:
1581 # remote is new and unpublishing
1584 # remote is new and unpublishing
1582 pheads, _dr = phases.analyzeremotephases(self, subset,
1585 pheads, _dr = phases.analyzeremotephases(self, subset,
1583 remotephases)
1586 remotephases)
1584 phases.advanceboundary(self, phases.public, pheads)
1587 phases.advanceboundary(self, phases.public, pheads)
1585 phases.advanceboundary(self, phases.draft, subset)
1588 phases.advanceboundary(self, phases.draft, subset)
1586 else:
1589 else:
1587 # Remote is old or publishing all common changesets
1590 # Remote is old or publishing all common changesets
1588 # should be seen as public
1591 # should be seen as public
1589 phases.advanceboundary(self, phases.public, subset)
1592 phases.advanceboundary(self, phases.public, subset)
1590 finally:
1593 finally:
1591 lock.release()
1594 lock.release()
1592
1595
1593 return result
1596 return result
1594
1597
1595 def checkpush(self, force, revs):
1598 def checkpush(self, force, revs):
1596 """Extensions can override this function if additional checks have
1599 """Extensions can override this function if additional checks have
1597 to be performed before pushing, or call it if they override push
1600 to be performed before pushing, or call it if they override push
1598 command.
1601 command.
1599 """
1602 """
1600 pass
1603 pass
1601
1604
1602 def push(self, remote, force=False, revs=None, newbranch=False):
1605 def push(self, remote, force=False, revs=None, newbranch=False):
1603 '''Push outgoing changesets (limited by revs) from the current
1606 '''Push outgoing changesets (limited by revs) from the current
1604 repository to remote. Return an integer:
1607 repository to remote. Return an integer:
1605 - None means nothing to push
1608 - None means nothing to push
1606 - 0 means HTTP error
1609 - 0 means HTTP error
1607 - 1 means we pushed and remote head count is unchanged *or*
1610 - 1 means we pushed and remote head count is unchanged *or*
1608 we have outgoing changesets but refused to push
1611 we have outgoing changesets but refused to push
1609 - other values as described by addchangegroup()
1612 - other values as described by addchangegroup()
1610 '''
1613 '''
1611 # there are two ways to push to remote repo:
1614 # there are two ways to push to remote repo:
1612 #
1615 #
1613 # addchangegroup assumes local user can lock remote
1616 # addchangegroup assumes local user can lock remote
1614 # repo (local filesystem, old ssh servers).
1617 # repo (local filesystem, old ssh servers).
1615 #
1618 #
1616 # unbundle assumes local user cannot lock remote repo (new ssh
1619 # unbundle assumes local user cannot lock remote repo (new ssh
1617 # servers, http servers).
1620 # servers, http servers).
1618
1621
1619 # get local lock as we might write phase data
1622 # get local lock as we might write phase data
1620 locallock = self.lock()
1623 locallock = self.lock()
1621 try:
1624 try:
1622 self.checkpush(force, revs)
1625 self.checkpush(force, revs)
1623 lock = None
1626 lock = None
1624 unbundle = remote.capable('unbundle')
1627 unbundle = remote.capable('unbundle')
1625 if not unbundle:
1628 if not unbundle:
1626 lock = remote.lock()
1629 lock = remote.lock()
1627 try:
1630 try:
1628 # discovery
1631 # discovery
1629 fci = discovery.findcommonincoming
1632 fci = discovery.findcommonincoming
1630 commoninc = fci(self, remote, force=force)
1633 commoninc = fci(self, remote, force=force)
1631 common, inc, remoteheads = commoninc
1634 common, inc, remoteheads = commoninc
1632 fco = discovery.findcommonoutgoing
1635 fco = discovery.findcommonoutgoing
1633 outgoing = fco(self, remote, onlyheads=revs,
1636 outgoing = fco(self, remote, onlyheads=revs,
1634 commoninc=commoninc, force=force)
1637 commoninc=commoninc, force=force)
1635
1638
1636
1639
1637 if not outgoing.missing:
1640 if not outgoing.missing:
1638 # nothing to push
1641 # nothing to push
1639 scmutil.nochangesfound(self.ui, outgoing.excluded)
1642 scmutil.nochangesfound(self.ui, outgoing.excluded)
1640 ret = None
1643 ret = None
1641 else:
1644 else:
1642 # something to push
1645 # something to push
1643 if not force:
1646 if not force:
1644 discovery.checkheads(self, remote, outgoing,
1647 discovery.checkheads(self, remote, outgoing,
1645 remoteheads, newbranch,
1648 remoteheads, newbranch,
1646 bool(inc))
1649 bool(inc))
1647
1650
1648 # create a changegroup from local
1651 # create a changegroup from local
1649 if revs is None and not outgoing.excluded:
1652 if revs is None and not outgoing.excluded:
1650 # push everything,
1653 # push everything,
1651 # use the fast path, no race possible on push
1654 # use the fast path, no race possible on push
1652 cg = self._changegroup(outgoing.missing, 'push')
1655 cg = self._changegroup(outgoing.missing, 'push')
1653 else:
1656 else:
1654 cg = self.getlocalbundle('push', outgoing)
1657 cg = self.getlocalbundle('push', outgoing)
1655
1658
1656 # apply changegroup to remote
1659 # apply changegroup to remote
1657 if unbundle:
1660 if unbundle:
1658 # local repo finds heads on server, finds out what
1661 # local repo finds heads on server, finds out what
1659 # revs it must push. once revs transferred, if server
1662 # revs it must push. once revs transferred, if server
1660 # finds it has different heads (someone else won
1663 # finds it has different heads (someone else won
1661 # commit/push race), server aborts.
1664 # commit/push race), server aborts.
1662 if force:
1665 if force:
1663 remoteheads = ['force']
1666 remoteheads = ['force']
1664 # ssh: return remote's addchangegroup()
1667 # ssh: return remote's addchangegroup()
1665 # http: return remote's addchangegroup() or 0 for error
1668 # http: return remote's addchangegroup() or 0 for error
1666 ret = remote.unbundle(cg, remoteheads, 'push')
1669 ret = remote.unbundle(cg, remoteheads, 'push')
1667 else:
1670 else:
1668 # we return an integer indicating remote head count change
1671 # we return an integer indicating remote head count change
1669 ret = remote.addchangegroup(cg, 'push', self.url())
1672 ret = remote.addchangegroup(cg, 'push', self.url())
1670
1673
1671 if ret:
1674 if ret:
1672 # push succeed, synchonize target of the push
1675 # push succeed, synchonize target of the push
1673 cheads = outgoing.missingheads
1676 cheads = outgoing.missingheads
1674 elif revs is None:
1677 elif revs is None:
1675 # All out push fails. synchronize all common
1678 # All out push fails. synchronize all common
1676 cheads = outgoing.commonheads
1679 cheads = outgoing.commonheads
1677 else:
1680 else:
1678 # I want cheads = heads(::missingheads and ::commonheads)
1681 # I want cheads = heads(::missingheads and ::commonheads)
1679 # (missingheads is revs with secret changeset filtered out)
1682 # (missingheads is revs with secret changeset filtered out)
1680 #
1683 #
1681 # This can be expressed as:
1684 # This can be expressed as:
1682 # cheads = ( (missingheads and ::commonheads)
1685 # cheads = ( (missingheads and ::commonheads)
1683 # + (commonheads and ::missingheads))"
1686 # + (commonheads and ::missingheads))"
1684 # )
1687 # )
1685 #
1688 #
1686 # while trying to push we already computed the following:
1689 # while trying to push we already computed the following:
1687 # common = (::commonheads)
1690 # common = (::commonheads)
1688 # missing = ((commonheads::missingheads) - commonheads)
1691 # missing = ((commonheads::missingheads) - commonheads)
1689 #
1692 #
1690 # We can pick:
1693 # We can pick:
1691 # * missingheads part of comon (::commonheads)
1694 # * missingheads part of comon (::commonheads)
1692 common = set(outgoing.common)
1695 common = set(outgoing.common)
1693 cheads = [node for node in revs if node in common]
1696 cheads = [node for node in revs if node in common]
1694 # and
1697 # and
1695 # * commonheads parents on missing
1698 # * commonheads parents on missing
1696 revset = self.set('%ln and parents(roots(%ln))',
1699 revset = self.set('%ln and parents(roots(%ln))',
1697 outgoing.commonheads,
1700 outgoing.commonheads,
1698 outgoing.missing)
1701 outgoing.missing)
1699 cheads.extend(c.node() for c in revset)
1702 cheads.extend(c.node() for c in revset)
1700 # even when we don't push, exchanging phase data is useful
1703 # even when we don't push, exchanging phase data is useful
1701 remotephases = remote.listkeys('phases')
1704 remotephases = remote.listkeys('phases')
1702 if not remotephases: # old server or public only repo
1705 if not remotephases: # old server or public only repo
1703 phases.advanceboundary(self, phases.public, cheads)
1706 phases.advanceboundary(self, phases.public, cheads)
1704 # don't push any phase data as there is nothing to push
1707 # don't push any phase data as there is nothing to push
1705 else:
1708 else:
1706 ana = phases.analyzeremotephases(self, cheads, remotephases)
1709 ana = phases.analyzeremotephases(self, cheads, remotephases)
1707 pheads, droots = ana
1710 pheads, droots = ana
1708 ### Apply remote phase on local
1711 ### Apply remote phase on local
1709 if remotephases.get('publishing', False):
1712 if remotephases.get('publishing', False):
1710 phases.advanceboundary(self, phases.public, cheads)
1713 phases.advanceboundary(self, phases.public, cheads)
1711 else: # publish = False
1714 else: # publish = False
1712 phases.advanceboundary(self, phases.public, pheads)
1715 phases.advanceboundary(self, phases.public, pheads)
1713 phases.advanceboundary(self, phases.draft, cheads)
1716 phases.advanceboundary(self, phases.draft, cheads)
1714 ### Apply local phase on remote
1717 ### Apply local phase on remote
1715
1718
1716 # Get the list of all revs draft on remote by public here.
1719 # Get the list of all revs draft on remote by public here.
1717 # XXX Beware that revset break if droots is not strictly
1720 # XXX Beware that revset break if droots is not strictly
1718 # XXX root we may want to ensure it is but it is costly
1721 # XXX root we may want to ensure it is but it is costly
1719 outdated = self.set('heads((%ln::%ln) and public())',
1722 outdated = self.set('heads((%ln::%ln) and public())',
1720 droots, cheads)
1723 droots, cheads)
1721 for newremotehead in outdated:
1724 for newremotehead in outdated:
1722 r = remote.pushkey('phases',
1725 r = remote.pushkey('phases',
1723 newremotehead.hex(),
1726 newremotehead.hex(),
1724 str(phases.draft),
1727 str(phases.draft),
1725 str(phases.public))
1728 str(phases.public))
1726 if not r:
1729 if not r:
1727 self.ui.warn(_('updating %s to public failed!\n')
1730 self.ui.warn(_('updating %s to public failed!\n')
1728 % newremotehead)
1731 % newremotehead)
1729 finally:
1732 finally:
1730 if lock is not None:
1733 if lock is not None:
1731 lock.release()
1734 lock.release()
1732 finally:
1735 finally:
1733 locallock.release()
1736 locallock.release()
1734
1737
1735 self.ui.debug("checking for updated bookmarks\n")
1738 self.ui.debug("checking for updated bookmarks\n")
1736 rb = remote.listkeys('bookmarks')
1739 rb = remote.listkeys('bookmarks')
1737 for k in rb.keys():
1740 for k in rb.keys():
1738 if k in self._bookmarks:
1741 if k in self._bookmarks:
1739 nr, nl = rb[k], hex(self._bookmarks[k])
1742 nr, nl = rb[k], hex(self._bookmarks[k])
1740 if nr in self:
1743 if nr in self:
1741 cr = self[nr]
1744 cr = self[nr]
1742 cl = self[nl]
1745 cl = self[nl]
1743 if cl in cr.descendants():
1746 if cl in cr.descendants():
1744 r = remote.pushkey('bookmarks', k, nr, nl)
1747 r = remote.pushkey('bookmarks', k, nr, nl)
1745 if r:
1748 if r:
1746 self.ui.status(_("updating bookmark %s\n") % k)
1749 self.ui.status(_("updating bookmark %s\n") % k)
1747 else:
1750 else:
1748 self.ui.warn(_('updating bookmark %s'
1751 self.ui.warn(_('updating bookmark %s'
1749 ' failed!\n') % k)
1752 ' failed!\n') % k)
1750
1753
1751 return ret
1754 return ret
1752
1755
1753 def changegroupinfo(self, nodes, source):
1756 def changegroupinfo(self, nodes, source):
1754 if self.ui.verbose or source == 'bundle':
1757 if self.ui.verbose or source == 'bundle':
1755 self.ui.status(_("%d changesets found\n") % len(nodes))
1758 self.ui.status(_("%d changesets found\n") % len(nodes))
1756 if self.ui.debugflag:
1759 if self.ui.debugflag:
1757 self.ui.debug("list of changesets:\n")
1760 self.ui.debug("list of changesets:\n")
1758 for node in nodes:
1761 for node in nodes:
1759 self.ui.debug("%s\n" % hex(node))
1762 self.ui.debug("%s\n" % hex(node))
1760
1763
1761 def changegroupsubset(self, bases, heads, source):
1764 def changegroupsubset(self, bases, heads, source):
1762 """Compute a changegroup consisting of all the nodes that are
1765 """Compute a changegroup consisting of all the nodes that are
1763 descendants of any of the bases and ancestors of any of the heads.
1766 descendants of any of the bases and ancestors of any of the heads.
1764 Return a chunkbuffer object whose read() method will return
1767 Return a chunkbuffer object whose read() method will return
1765 successive changegroup chunks.
1768 successive changegroup chunks.
1766
1769
1767 It is fairly complex as determining which filenodes and which
1770 It is fairly complex as determining which filenodes and which
1768 manifest nodes need to be included for the changeset to be complete
1771 manifest nodes need to be included for the changeset to be complete
1769 is non-trivial.
1772 is non-trivial.
1770
1773
1771 Another wrinkle is doing the reverse, figuring out which changeset in
1774 Another wrinkle is doing the reverse, figuring out which changeset in
1772 the changegroup a particular filenode or manifestnode belongs to.
1775 the changegroup a particular filenode or manifestnode belongs to.
1773 """
1776 """
1774 cl = self.changelog
1777 cl = self.changelog
1775 if not bases:
1778 if not bases:
1776 bases = [nullid]
1779 bases = [nullid]
1777 csets, bases, heads = cl.nodesbetween(bases, heads)
1780 csets, bases, heads = cl.nodesbetween(bases, heads)
1778 # We assume that all ancestors of bases are known
1781 # We assume that all ancestors of bases are known
1779 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1782 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1780 return self._changegroupsubset(common, csets, heads, source)
1783 return self._changegroupsubset(common, csets, heads, source)
1781
1784
1782 def getlocalbundle(self, source, outgoing):
1785 def getlocalbundle(self, source, outgoing):
1783 """Like getbundle, but taking a discovery.outgoing as an argument.
1786 """Like getbundle, but taking a discovery.outgoing as an argument.
1784
1787
1785 This is only implemented for local repos and reuses potentially
1788 This is only implemented for local repos and reuses potentially
1786 precomputed sets in outgoing."""
1789 precomputed sets in outgoing."""
1787 if not outgoing.missing:
1790 if not outgoing.missing:
1788 return None
1791 return None
1789 return self._changegroupsubset(outgoing.common,
1792 return self._changegroupsubset(outgoing.common,
1790 outgoing.missing,
1793 outgoing.missing,
1791 outgoing.missingheads,
1794 outgoing.missingheads,
1792 source)
1795 source)
1793
1796
1794 def getbundle(self, source, heads=None, common=None):
1797 def getbundle(self, source, heads=None, common=None):
1795 """Like changegroupsubset, but returns the set difference between the
1798 """Like changegroupsubset, but returns the set difference between the
1796 ancestors of heads and the ancestors common.
1799 ancestors of heads and the ancestors common.
1797
1800
1798 If heads is None, use the local heads. If common is None, use [nullid].
1801 If heads is None, use the local heads. If common is None, use [nullid].
1799
1802
1800 The nodes in common might not all be known locally due to the way the
1803 The nodes in common might not all be known locally due to the way the
1801 current discovery protocol works.
1804 current discovery protocol works.
1802 """
1805 """
1803 cl = self.changelog
1806 cl = self.changelog
1804 if common:
1807 if common:
1805 nm = cl.nodemap
1808 nm = cl.nodemap
1806 common = [n for n in common if n in nm]
1809 common = [n for n in common if n in nm]
1807 else:
1810 else:
1808 common = [nullid]
1811 common = [nullid]
1809 if not heads:
1812 if not heads:
1810 heads = cl.heads()
1813 heads = cl.heads()
1811 return self.getlocalbundle(source,
1814 return self.getlocalbundle(source,
1812 discovery.outgoing(cl, common, heads))
1815 discovery.outgoing(cl, common, heads))
1813
1816
1814 def _changegroupsubset(self, commonrevs, csets, heads, source):
1817 def _changegroupsubset(self, commonrevs, csets, heads, source):
1815
1818
1816 cl = self.changelog
1819 cl = self.changelog
1817 mf = self.manifest
1820 mf = self.manifest
1818 mfs = {} # needed manifests
1821 mfs = {} # needed manifests
1819 fnodes = {} # needed file nodes
1822 fnodes = {} # needed file nodes
1820 changedfiles = set()
1823 changedfiles = set()
1821 fstate = ['', {}]
1824 fstate = ['', {}]
1822 count = [0]
1825 count = [0]
1823
1826
1824 # can we go through the fast path ?
1827 # can we go through the fast path ?
1825 heads.sort()
1828 heads.sort()
1826 if heads == sorted(self.heads()):
1829 if heads == sorted(self.heads()):
1827 return self._changegroup(csets, source)
1830 return self._changegroup(csets, source)
1828
1831
1829 # slow path
1832 # slow path
1830 self.hook('preoutgoing', throw=True, source=source)
1833 self.hook('preoutgoing', throw=True, source=source)
1831 self.changegroupinfo(csets, source)
1834 self.changegroupinfo(csets, source)
1832
1835
1833 # filter any nodes that claim to be part of the known set
1836 # filter any nodes that claim to be part of the known set
1834 def prune(revlog, missing):
1837 def prune(revlog, missing):
1835 return [n for n in missing
1838 return [n for n in missing
1836 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1839 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1837
1840
1838 def lookup(revlog, x):
1841 def lookup(revlog, x):
1839 if revlog == cl:
1842 if revlog == cl:
1840 c = cl.read(x)
1843 c = cl.read(x)
1841 changedfiles.update(c[3])
1844 changedfiles.update(c[3])
1842 mfs.setdefault(c[0], x)
1845 mfs.setdefault(c[0], x)
1843 count[0] += 1
1846 count[0] += 1
1844 self.ui.progress(_('bundling'), count[0],
1847 self.ui.progress(_('bundling'), count[0],
1845 unit=_('changesets'), total=len(csets))
1848 unit=_('changesets'), total=len(csets))
1846 return x
1849 return x
1847 elif revlog == mf:
1850 elif revlog == mf:
1848 clnode = mfs[x]
1851 clnode = mfs[x]
1849 mdata = mf.readfast(x)
1852 mdata = mf.readfast(x)
1850 for f in changedfiles:
1853 for f in changedfiles:
1851 if f in mdata:
1854 if f in mdata:
1852 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1855 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1853 count[0] += 1
1856 count[0] += 1
1854 self.ui.progress(_('bundling'), count[0],
1857 self.ui.progress(_('bundling'), count[0],
1855 unit=_('manifests'), total=len(mfs))
1858 unit=_('manifests'), total=len(mfs))
1856 return mfs[x]
1859 return mfs[x]
1857 else:
1860 else:
1858 self.ui.progress(
1861 self.ui.progress(
1859 _('bundling'), count[0], item=fstate[0],
1862 _('bundling'), count[0], item=fstate[0],
1860 unit=_('files'), total=len(changedfiles))
1863 unit=_('files'), total=len(changedfiles))
1861 return fstate[1][x]
1864 return fstate[1][x]
1862
1865
1863 bundler = changegroup.bundle10(lookup)
1866 bundler = changegroup.bundle10(lookup)
1864 reorder = self.ui.config('bundle', 'reorder', 'auto')
1867 reorder = self.ui.config('bundle', 'reorder', 'auto')
1865 if reorder == 'auto':
1868 if reorder == 'auto':
1866 reorder = None
1869 reorder = None
1867 else:
1870 else:
1868 reorder = util.parsebool(reorder)
1871 reorder = util.parsebool(reorder)
1869
1872
1870 def gengroup():
1873 def gengroup():
1871 # Create a changenode group generator that will call our functions
1874 # Create a changenode group generator that will call our functions
1872 # back to lookup the owning changenode and collect information.
1875 # back to lookup the owning changenode and collect information.
1873 for chunk in cl.group(csets, bundler, reorder=reorder):
1876 for chunk in cl.group(csets, bundler, reorder=reorder):
1874 yield chunk
1877 yield chunk
1875 self.ui.progress(_('bundling'), None)
1878 self.ui.progress(_('bundling'), None)
1876
1879
1877 # Create a generator for the manifestnodes that calls our lookup
1880 # Create a generator for the manifestnodes that calls our lookup
1878 # and data collection functions back.
1881 # and data collection functions back.
1879 count[0] = 0
1882 count[0] = 0
1880 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1883 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1881 yield chunk
1884 yield chunk
1882 self.ui.progress(_('bundling'), None)
1885 self.ui.progress(_('bundling'), None)
1883
1886
1884 mfs.clear()
1887 mfs.clear()
1885
1888
1886 # Go through all our files in order sorted by name.
1889 # Go through all our files in order sorted by name.
1887 count[0] = 0
1890 count[0] = 0
1888 for fname in sorted(changedfiles):
1891 for fname in sorted(changedfiles):
1889 filerevlog = self.file(fname)
1892 filerevlog = self.file(fname)
1890 if not len(filerevlog):
1893 if not len(filerevlog):
1891 raise util.Abort(_("empty or missing revlog for %s") % fname)
1894 raise util.Abort(_("empty or missing revlog for %s") % fname)
1892 fstate[0] = fname
1895 fstate[0] = fname
1893 fstate[1] = fnodes.pop(fname, {})
1896 fstate[1] = fnodes.pop(fname, {})
1894
1897
1895 nodelist = prune(filerevlog, fstate[1])
1898 nodelist = prune(filerevlog, fstate[1])
1896 if nodelist:
1899 if nodelist:
1897 count[0] += 1
1900 count[0] += 1
1898 yield bundler.fileheader(fname)
1901 yield bundler.fileheader(fname)
1899 for chunk in filerevlog.group(nodelist, bundler, reorder):
1902 for chunk in filerevlog.group(nodelist, bundler, reorder):
1900 yield chunk
1903 yield chunk
1901
1904
1902 # Signal that no more groups are left.
1905 # Signal that no more groups are left.
1903 yield bundler.close()
1906 yield bundler.close()
1904 self.ui.progress(_('bundling'), None)
1907 self.ui.progress(_('bundling'), None)
1905
1908
1906 if csets:
1909 if csets:
1907 self.hook('outgoing', node=hex(csets[0]), source=source)
1910 self.hook('outgoing', node=hex(csets[0]), source=source)
1908
1911
1909 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1912 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1910
1913
1911 def changegroup(self, basenodes, source):
1914 def changegroup(self, basenodes, source):
1912 # to avoid a race we use changegroupsubset() (issue1320)
1915 # to avoid a race we use changegroupsubset() (issue1320)
1913 return self.changegroupsubset(basenodes, self.heads(), source)
1916 return self.changegroupsubset(basenodes, self.heads(), source)
1914
1917
1915 def _changegroup(self, nodes, source):
1918 def _changegroup(self, nodes, source):
1916 """Compute the changegroup of all nodes that we have that a recipient
1919 """Compute the changegroup of all nodes that we have that a recipient
1917 doesn't. Return a chunkbuffer object whose read() method will return
1920 doesn't. Return a chunkbuffer object whose read() method will return
1918 successive changegroup chunks.
1921 successive changegroup chunks.
1919
1922
1920 This is much easier than the previous function as we can assume that
1923 This is much easier than the previous function as we can assume that
1921 the recipient has any changenode we aren't sending them.
1924 the recipient has any changenode we aren't sending them.
1922
1925
1923 nodes is the set of nodes to send"""
1926 nodes is the set of nodes to send"""
1924
1927
1925 cl = self.changelog
1928 cl = self.changelog
1926 mf = self.manifest
1929 mf = self.manifest
1927 mfs = {}
1930 mfs = {}
1928 changedfiles = set()
1931 changedfiles = set()
1929 fstate = ['']
1932 fstate = ['']
1930 count = [0]
1933 count = [0]
1931
1934
1932 self.hook('preoutgoing', throw=True, source=source)
1935 self.hook('preoutgoing', throw=True, source=source)
1933 self.changegroupinfo(nodes, source)
1936 self.changegroupinfo(nodes, source)
1934
1937
1935 revset = set([cl.rev(n) for n in nodes])
1938 revset = set([cl.rev(n) for n in nodes])
1936
1939
1937 def gennodelst(log):
1940 def gennodelst(log):
1938 return [log.node(r) for r in log if log.linkrev(r) in revset]
1941 return [log.node(r) for r in log if log.linkrev(r) in revset]
1939
1942
1940 def lookup(revlog, x):
1943 def lookup(revlog, x):
1941 if revlog == cl:
1944 if revlog == cl:
1942 c = cl.read(x)
1945 c = cl.read(x)
1943 changedfiles.update(c[3])
1946 changedfiles.update(c[3])
1944 mfs.setdefault(c[0], x)
1947 mfs.setdefault(c[0], x)
1945 count[0] += 1
1948 count[0] += 1
1946 self.ui.progress(_('bundling'), count[0],
1949 self.ui.progress(_('bundling'), count[0],
1947 unit=_('changesets'), total=len(nodes))
1950 unit=_('changesets'), total=len(nodes))
1948 return x
1951 return x
1949 elif revlog == mf:
1952 elif revlog == mf:
1950 count[0] += 1
1953 count[0] += 1
1951 self.ui.progress(_('bundling'), count[0],
1954 self.ui.progress(_('bundling'), count[0],
1952 unit=_('manifests'), total=len(mfs))
1955 unit=_('manifests'), total=len(mfs))
1953 return cl.node(revlog.linkrev(revlog.rev(x)))
1956 return cl.node(revlog.linkrev(revlog.rev(x)))
1954 else:
1957 else:
1955 self.ui.progress(
1958 self.ui.progress(
1956 _('bundling'), count[0], item=fstate[0],
1959 _('bundling'), count[0], item=fstate[0],
1957 total=len(changedfiles), unit=_('files'))
1960 total=len(changedfiles), unit=_('files'))
1958 return cl.node(revlog.linkrev(revlog.rev(x)))
1961 return cl.node(revlog.linkrev(revlog.rev(x)))
1959
1962
1960 bundler = changegroup.bundle10(lookup)
1963 bundler = changegroup.bundle10(lookup)
1961 reorder = self.ui.config('bundle', 'reorder', 'auto')
1964 reorder = self.ui.config('bundle', 'reorder', 'auto')
1962 if reorder == 'auto':
1965 if reorder == 'auto':
1963 reorder = None
1966 reorder = None
1964 else:
1967 else:
1965 reorder = util.parsebool(reorder)
1968 reorder = util.parsebool(reorder)
1966
1969
1967 def gengroup():
1970 def gengroup():
1968 '''yield a sequence of changegroup chunks (strings)'''
1971 '''yield a sequence of changegroup chunks (strings)'''
1969 # construct a list of all changed files
1972 # construct a list of all changed files
1970
1973
1971 for chunk in cl.group(nodes, bundler, reorder=reorder):
1974 for chunk in cl.group(nodes, bundler, reorder=reorder):
1972 yield chunk
1975 yield chunk
1973 self.ui.progress(_('bundling'), None)
1976 self.ui.progress(_('bundling'), None)
1974
1977
1975 count[0] = 0
1978 count[0] = 0
1976 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1979 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1977 yield chunk
1980 yield chunk
1978 self.ui.progress(_('bundling'), None)
1981 self.ui.progress(_('bundling'), None)
1979
1982
1980 count[0] = 0
1983 count[0] = 0
1981 for fname in sorted(changedfiles):
1984 for fname in sorted(changedfiles):
1982 filerevlog = self.file(fname)
1985 filerevlog = self.file(fname)
1983 if not len(filerevlog):
1986 if not len(filerevlog):
1984 raise util.Abort(_("empty or missing revlog for %s") % fname)
1987 raise util.Abort(_("empty or missing revlog for %s") % fname)
1985 fstate[0] = fname
1988 fstate[0] = fname
1986 nodelist = gennodelst(filerevlog)
1989 nodelist = gennodelst(filerevlog)
1987 if nodelist:
1990 if nodelist:
1988 count[0] += 1
1991 count[0] += 1
1989 yield bundler.fileheader(fname)
1992 yield bundler.fileheader(fname)
1990 for chunk in filerevlog.group(nodelist, bundler, reorder):
1993 for chunk in filerevlog.group(nodelist, bundler, reorder):
1991 yield chunk
1994 yield chunk
1992 yield bundler.close()
1995 yield bundler.close()
1993 self.ui.progress(_('bundling'), None)
1996 self.ui.progress(_('bundling'), None)
1994
1997
1995 if nodes:
1998 if nodes:
1996 self.hook('outgoing', node=hex(nodes[0]), source=source)
1999 self.hook('outgoing', node=hex(nodes[0]), source=source)
1997
2000
1998 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2001 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1999
2002
2000 def addchangegroup(self, source, srctype, url, emptyok=False):
2003 def addchangegroup(self, source, srctype, url, emptyok=False):
2001 """Add the changegroup returned by source.read() to this repo.
2004 """Add the changegroup returned by source.read() to this repo.
2002 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2005 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2003 the URL of the repo where this changegroup is coming from.
2006 the URL of the repo where this changegroup is coming from.
2004
2007
2005 Return an integer summarizing the change to this repo:
2008 Return an integer summarizing the change to this repo:
2006 - nothing changed or no source: 0
2009 - nothing changed or no source: 0
2007 - more heads than before: 1+added heads (2..n)
2010 - more heads than before: 1+added heads (2..n)
2008 - fewer heads than before: -1-removed heads (-2..-n)
2011 - fewer heads than before: -1-removed heads (-2..-n)
2009 - number of heads stays the same: 1
2012 - number of heads stays the same: 1
2010 """
2013 """
2011 def csmap(x):
2014 def csmap(x):
2012 self.ui.debug("add changeset %s\n" % short(x))
2015 self.ui.debug("add changeset %s\n" % short(x))
2013 return len(cl)
2016 return len(cl)
2014
2017
2015 def revmap(x):
2018 def revmap(x):
2016 return cl.rev(x)
2019 return cl.rev(x)
2017
2020
2018 if not source:
2021 if not source:
2019 return 0
2022 return 0
2020
2023
2021 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2024 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2022
2025
2023 changesets = files = revisions = 0
2026 changesets = files = revisions = 0
2024 efiles = set()
2027 efiles = set()
2025
2028
2026 # write changelog data to temp files so concurrent readers will not see
2029 # write changelog data to temp files so concurrent readers will not see
2027 # inconsistent view
2030 # inconsistent view
2028 cl = self.changelog
2031 cl = self.changelog
2029 cl.delayupdate()
2032 cl.delayupdate()
2030 oldheads = cl.heads()
2033 oldheads = cl.heads()
2031
2034
2032 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2035 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2033 try:
2036 try:
2034 trp = weakref.proxy(tr)
2037 trp = weakref.proxy(tr)
2035 # pull off the changeset group
2038 # pull off the changeset group
2036 self.ui.status(_("adding changesets\n"))
2039 self.ui.status(_("adding changesets\n"))
2037 clstart = len(cl)
2040 clstart = len(cl)
2038 class prog(object):
2041 class prog(object):
2039 step = _('changesets')
2042 step = _('changesets')
2040 count = 1
2043 count = 1
2041 ui = self.ui
2044 ui = self.ui
2042 total = None
2045 total = None
2043 def __call__(self):
2046 def __call__(self):
2044 self.ui.progress(self.step, self.count, unit=_('chunks'),
2047 self.ui.progress(self.step, self.count, unit=_('chunks'),
2045 total=self.total)
2048 total=self.total)
2046 self.count += 1
2049 self.count += 1
2047 pr = prog()
2050 pr = prog()
2048 source.callback = pr
2051 source.callback = pr
2049
2052
2050 source.changelogheader()
2053 source.changelogheader()
2051 srccontent = cl.addgroup(source, csmap, trp)
2054 srccontent = cl.addgroup(source, csmap, trp)
2052 if not (srccontent or emptyok):
2055 if not (srccontent or emptyok):
2053 raise util.Abort(_("received changelog group is empty"))
2056 raise util.Abort(_("received changelog group is empty"))
2054 clend = len(cl)
2057 clend = len(cl)
2055 changesets = clend - clstart
2058 changesets = clend - clstart
2056 for c in xrange(clstart, clend):
2059 for c in xrange(clstart, clend):
2057 efiles.update(self[c].files())
2060 efiles.update(self[c].files())
2058 efiles = len(efiles)
2061 efiles = len(efiles)
2059 self.ui.progress(_('changesets'), None)
2062 self.ui.progress(_('changesets'), None)
2060
2063
2061 # pull off the manifest group
2064 # pull off the manifest group
2062 self.ui.status(_("adding manifests\n"))
2065 self.ui.status(_("adding manifests\n"))
2063 pr.step = _('manifests')
2066 pr.step = _('manifests')
2064 pr.count = 1
2067 pr.count = 1
2065 pr.total = changesets # manifests <= changesets
2068 pr.total = changesets # manifests <= changesets
2066 # no need to check for empty manifest group here:
2069 # no need to check for empty manifest group here:
2067 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2070 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2068 # no new manifest will be created and the manifest group will
2071 # no new manifest will be created and the manifest group will
2069 # be empty during the pull
2072 # be empty during the pull
2070 source.manifestheader()
2073 source.manifestheader()
2071 self.manifest.addgroup(source, revmap, trp)
2074 self.manifest.addgroup(source, revmap, trp)
2072 self.ui.progress(_('manifests'), None)
2075 self.ui.progress(_('manifests'), None)
2073
2076
2074 needfiles = {}
2077 needfiles = {}
2075 if self.ui.configbool('server', 'validate', default=False):
2078 if self.ui.configbool('server', 'validate', default=False):
2076 # validate incoming csets have their manifests
2079 # validate incoming csets have their manifests
2077 for cset in xrange(clstart, clend):
2080 for cset in xrange(clstart, clend):
2078 mfest = self.changelog.read(self.changelog.node(cset))[0]
2081 mfest = self.changelog.read(self.changelog.node(cset))[0]
2079 mfest = self.manifest.readdelta(mfest)
2082 mfest = self.manifest.readdelta(mfest)
2080 # store file nodes we must see
2083 # store file nodes we must see
2081 for f, n in mfest.iteritems():
2084 for f, n in mfest.iteritems():
2082 needfiles.setdefault(f, set()).add(n)
2085 needfiles.setdefault(f, set()).add(n)
2083
2086
2084 # process the files
2087 # process the files
2085 self.ui.status(_("adding file changes\n"))
2088 self.ui.status(_("adding file changes\n"))
2086 pr.step = _('files')
2089 pr.step = _('files')
2087 pr.count = 1
2090 pr.count = 1
2088 pr.total = efiles
2091 pr.total = efiles
2089 source.callback = None
2092 source.callback = None
2090
2093
2091 while True:
2094 while True:
2092 chunkdata = source.filelogheader()
2095 chunkdata = source.filelogheader()
2093 if not chunkdata:
2096 if not chunkdata:
2094 break
2097 break
2095 f = chunkdata["filename"]
2098 f = chunkdata["filename"]
2096 self.ui.debug("adding %s revisions\n" % f)
2099 self.ui.debug("adding %s revisions\n" % f)
2097 pr()
2100 pr()
2098 fl = self.file(f)
2101 fl = self.file(f)
2099 o = len(fl)
2102 o = len(fl)
2100 if not fl.addgroup(source, revmap, trp):
2103 if not fl.addgroup(source, revmap, trp):
2101 raise util.Abort(_("received file revlog group is empty"))
2104 raise util.Abort(_("received file revlog group is empty"))
2102 revisions += len(fl) - o
2105 revisions += len(fl) - o
2103 files += 1
2106 files += 1
2104 if f in needfiles:
2107 if f in needfiles:
2105 needs = needfiles[f]
2108 needs = needfiles[f]
2106 for new in xrange(o, len(fl)):
2109 for new in xrange(o, len(fl)):
2107 n = fl.node(new)
2110 n = fl.node(new)
2108 if n in needs:
2111 if n in needs:
2109 needs.remove(n)
2112 needs.remove(n)
2110 if not needs:
2113 if not needs:
2111 del needfiles[f]
2114 del needfiles[f]
2112 self.ui.progress(_('files'), None)
2115 self.ui.progress(_('files'), None)
2113
2116
2114 for f, needs in needfiles.iteritems():
2117 for f, needs in needfiles.iteritems():
2115 fl = self.file(f)
2118 fl = self.file(f)
2116 for n in needs:
2119 for n in needs:
2117 try:
2120 try:
2118 fl.rev(n)
2121 fl.rev(n)
2119 except error.LookupError:
2122 except error.LookupError:
2120 raise util.Abort(
2123 raise util.Abort(
2121 _('missing file data for %s:%s - run hg verify') %
2124 _('missing file data for %s:%s - run hg verify') %
2122 (f, hex(n)))
2125 (f, hex(n)))
2123
2126
2124 dh = 0
2127 dh = 0
2125 if oldheads:
2128 if oldheads:
2126 heads = cl.heads()
2129 heads = cl.heads()
2127 dh = len(heads) - len(oldheads)
2130 dh = len(heads) - len(oldheads)
2128 for h in heads:
2131 for h in heads:
2129 if h not in oldheads and 'close' in self[h].extra():
2132 if h not in oldheads and 'close' in self[h].extra():
2130 dh -= 1
2133 dh -= 1
2131 htext = ""
2134 htext = ""
2132 if dh:
2135 if dh:
2133 htext = _(" (%+d heads)") % dh
2136 htext = _(" (%+d heads)") % dh
2134
2137
2135 self.ui.status(_("added %d changesets"
2138 self.ui.status(_("added %d changesets"
2136 " with %d changes to %d files%s\n")
2139 " with %d changes to %d files%s\n")
2137 % (changesets, revisions, files, htext))
2140 % (changesets, revisions, files, htext))
2138
2141
2139 if changesets > 0:
2142 if changesets > 0:
2140 p = lambda: cl.writepending() and self.root or ""
2143 p = lambda: cl.writepending() and self.root or ""
2141 self.hook('pretxnchangegroup', throw=True,
2144 self.hook('pretxnchangegroup', throw=True,
2142 node=hex(cl.node(clstart)), source=srctype,
2145 node=hex(cl.node(clstart)), source=srctype,
2143 url=url, pending=p)
2146 url=url, pending=p)
2144
2147
2145 added = [cl.node(r) for r in xrange(clstart, clend)]
2148 added = [cl.node(r) for r in xrange(clstart, clend)]
2146 publishing = self.ui.configbool('phases', 'publish', True)
2149 publishing = self.ui.configbool('phases', 'publish', True)
2147 if srctype == 'push':
2150 if srctype == 'push':
2148 # Old server can not push the boundary themself.
2151 # Old server can not push the boundary themself.
2149 # New server won't push the boundary if changeset already
2152 # New server won't push the boundary if changeset already
2150 # existed locally as secrete
2153 # existed locally as secrete
2151 #
2154 #
2152 # We should not use added here but the list of all change in
2155 # We should not use added here but the list of all change in
2153 # the bundle
2156 # the bundle
2154 if publishing:
2157 if publishing:
2155 phases.advanceboundary(self, phases.public, srccontent)
2158 phases.advanceboundary(self, phases.public, srccontent)
2156 else:
2159 else:
2157 phases.advanceboundary(self, phases.draft, srccontent)
2160 phases.advanceboundary(self, phases.draft, srccontent)
2158 phases.retractboundary(self, phases.draft, added)
2161 phases.retractboundary(self, phases.draft, added)
2159 elif srctype != 'strip':
2162 elif srctype != 'strip':
2160 # publishing only alter behavior during push
2163 # publishing only alter behavior during push
2161 #
2164 #
2162 # strip should not touch boundary at all
2165 # strip should not touch boundary at all
2163 phases.retractboundary(self, phases.draft, added)
2166 phases.retractboundary(self, phases.draft, added)
2164
2167
2165 # make changelog see real files again
2168 # make changelog see real files again
2166 cl.finalize(trp)
2169 cl.finalize(trp)
2167
2170
2168 tr.close()
2171 tr.close()
2169
2172
2170 if changesets > 0:
2173 if changesets > 0:
2171 def runhooks():
2174 def runhooks():
2172 # forcefully update the on-disk branch cache
2175 # forcefully update the on-disk branch cache
2173 self.ui.debug("updating the branch cache\n")
2176 self.ui.debug("updating the branch cache\n")
2174 self.updatebranchcache()
2177 self.updatebranchcache()
2175 self.hook("changegroup", node=hex(cl.node(clstart)),
2178 self.hook("changegroup", node=hex(cl.node(clstart)),
2176 source=srctype, url=url)
2179 source=srctype, url=url)
2177
2180
2178 for n in added:
2181 for n in added:
2179 self.hook("incoming", node=hex(n), source=srctype,
2182 self.hook("incoming", node=hex(n), source=srctype,
2180 url=url)
2183 url=url)
2181 self._afterlock(runhooks)
2184 self._afterlock(runhooks)
2182
2185
2183 finally:
2186 finally:
2184 tr.release()
2187 tr.release()
2185 # never return 0 here:
2188 # never return 0 here:
2186 if dh < 0:
2189 if dh < 0:
2187 return dh - 1
2190 return dh - 1
2188 else:
2191 else:
2189 return dh + 1
2192 return dh + 1
2190
2193
2191 def stream_in(self, remote, requirements):
2194 def stream_in(self, remote, requirements):
2192 lock = self.lock()
2195 lock = self.lock()
2193 try:
2196 try:
2194 fp = remote.stream_out()
2197 fp = remote.stream_out()
2195 l = fp.readline()
2198 l = fp.readline()
2196 try:
2199 try:
2197 resp = int(l)
2200 resp = int(l)
2198 except ValueError:
2201 except ValueError:
2199 raise error.ResponseError(
2202 raise error.ResponseError(
2200 _('Unexpected response from remote server:'), l)
2203 _('Unexpected response from remote server:'), l)
2201 if resp == 1:
2204 if resp == 1:
2202 raise util.Abort(_('operation forbidden by server'))
2205 raise util.Abort(_('operation forbidden by server'))
2203 elif resp == 2:
2206 elif resp == 2:
2204 raise util.Abort(_('locking the remote repository failed'))
2207 raise util.Abort(_('locking the remote repository failed'))
2205 elif resp != 0:
2208 elif resp != 0:
2206 raise util.Abort(_('the server sent an unknown error code'))
2209 raise util.Abort(_('the server sent an unknown error code'))
2207 self.ui.status(_('streaming all changes\n'))
2210 self.ui.status(_('streaming all changes\n'))
2208 l = fp.readline()
2211 l = fp.readline()
2209 try:
2212 try:
2210 total_files, total_bytes = map(int, l.split(' ', 1))
2213 total_files, total_bytes = map(int, l.split(' ', 1))
2211 except (ValueError, TypeError):
2214 except (ValueError, TypeError):
2212 raise error.ResponseError(
2215 raise error.ResponseError(
2213 _('Unexpected response from remote server:'), l)
2216 _('Unexpected response from remote server:'), l)
2214 self.ui.status(_('%d files to transfer, %s of data\n') %
2217 self.ui.status(_('%d files to transfer, %s of data\n') %
2215 (total_files, util.bytecount(total_bytes)))
2218 (total_files, util.bytecount(total_bytes)))
2216 start = time.time()
2219 start = time.time()
2217 for i in xrange(total_files):
2220 for i in xrange(total_files):
2218 # XXX doesn't support '\n' or '\r' in filenames
2221 # XXX doesn't support '\n' or '\r' in filenames
2219 l = fp.readline()
2222 l = fp.readline()
2220 try:
2223 try:
2221 name, size = l.split('\0', 1)
2224 name, size = l.split('\0', 1)
2222 size = int(size)
2225 size = int(size)
2223 except (ValueError, TypeError):
2226 except (ValueError, TypeError):
2224 raise error.ResponseError(
2227 raise error.ResponseError(
2225 _('Unexpected response from remote server:'), l)
2228 _('Unexpected response from remote server:'), l)
2226 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2229 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2227 # for backwards compat, name was partially encoded
2230 # for backwards compat, name was partially encoded
2228 ofp = self.sopener(store.decodedir(name), 'w')
2231 ofp = self.sopener(store.decodedir(name), 'w')
2229 for chunk in util.filechunkiter(fp, limit=size):
2232 for chunk in util.filechunkiter(fp, limit=size):
2230 ofp.write(chunk)
2233 ofp.write(chunk)
2231 ofp.close()
2234 ofp.close()
2232 elapsed = time.time() - start
2235 elapsed = time.time() - start
2233 if elapsed <= 0:
2236 if elapsed <= 0:
2234 elapsed = 0.001
2237 elapsed = 0.001
2235 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2238 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2236 (util.bytecount(total_bytes), elapsed,
2239 (util.bytecount(total_bytes), elapsed,
2237 util.bytecount(total_bytes / elapsed)))
2240 util.bytecount(total_bytes / elapsed)))
2238
2241
2239 # new requirements = old non-format requirements + new format-related
2242 # new requirements = old non-format requirements + new format-related
2240 # requirements from the streamed-in repository
2243 # requirements from the streamed-in repository
2241 requirements.update(set(self.requirements) - self.supportedformats)
2244 requirements.update(set(self.requirements) - self.supportedformats)
2242 self._applyrequirements(requirements)
2245 self._applyrequirements(requirements)
2243 self._writerequirements()
2246 self._writerequirements()
2244
2247
2245 self.invalidate()
2248 self.invalidate()
2246 return len(self.heads()) + 1
2249 return len(self.heads()) + 1
2247 finally:
2250 finally:
2248 lock.release()
2251 lock.release()
2249
2252
2250 def clone(self, remote, heads=[], stream=False):
2253 def clone(self, remote, heads=[], stream=False):
2251 '''clone remote repository.
2254 '''clone remote repository.
2252
2255
2253 keyword arguments:
2256 keyword arguments:
2254 heads: list of revs to clone (forces use of pull)
2257 heads: list of revs to clone (forces use of pull)
2255 stream: use streaming clone if possible'''
2258 stream: use streaming clone if possible'''
2256
2259
2257 # now, all clients that can request uncompressed clones can
2260 # now, all clients that can request uncompressed clones can
2258 # read repo formats supported by all servers that can serve
2261 # read repo formats supported by all servers that can serve
2259 # them.
2262 # them.
2260
2263
2261 # if revlog format changes, client will have to check version
2264 # if revlog format changes, client will have to check version
2262 # and format flags on "stream" capability, and use
2265 # and format flags on "stream" capability, and use
2263 # uncompressed only if compatible.
2266 # uncompressed only if compatible.
2264
2267
2265 if stream and not heads:
2268 if stream and not heads:
2266 # 'stream' means remote revlog format is revlogv1 only
2269 # 'stream' means remote revlog format is revlogv1 only
2267 if remote.capable('stream'):
2270 if remote.capable('stream'):
2268 return self.stream_in(remote, set(('revlogv1',)))
2271 return self.stream_in(remote, set(('revlogv1',)))
2269 # otherwise, 'streamreqs' contains the remote revlog format
2272 # otherwise, 'streamreqs' contains the remote revlog format
2270 streamreqs = remote.capable('streamreqs')
2273 streamreqs = remote.capable('streamreqs')
2271 if streamreqs:
2274 if streamreqs:
2272 streamreqs = set(streamreqs.split(','))
2275 streamreqs = set(streamreqs.split(','))
2273 # if we support it, stream in and adjust our requirements
2276 # if we support it, stream in and adjust our requirements
2274 if not streamreqs - self.supportedformats:
2277 if not streamreqs - self.supportedformats:
2275 return self.stream_in(remote, streamreqs)
2278 return self.stream_in(remote, streamreqs)
2276 return self.pull(remote, heads)
2279 return self.pull(remote, heads)
2277
2280
2278 def pushkey(self, namespace, key, old, new):
2281 def pushkey(self, namespace, key, old, new):
2279 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2282 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2280 old=old, new=new)
2283 old=old, new=new)
2281 ret = pushkey.push(self, namespace, key, old, new)
2284 ret = pushkey.push(self, namespace, key, old, new)
2282 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2285 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2283 ret=ret)
2286 ret=ret)
2284 return ret
2287 return ret
2285
2288
2286 def listkeys(self, namespace):
2289 def listkeys(self, namespace):
2287 self.hook('prelistkeys', throw=True, namespace=namespace)
2290 self.hook('prelistkeys', throw=True, namespace=namespace)
2288 values = pushkey.list(self, namespace)
2291 values = pushkey.list(self, namespace)
2289 self.hook('listkeys', namespace=namespace, values=values)
2292 self.hook('listkeys', namespace=namespace, values=values)
2290 return values
2293 return values
2291
2294
2292 def debugwireargs(self, one, two, three=None, four=None, five=None):
2295 def debugwireargs(self, one, two, three=None, four=None, five=None):
2293 '''used to test argument passing over the wire'''
2296 '''used to test argument passing over the wire'''
2294 return "%s %s %s %s %s" % (one, two, three, four, five)
2297 return "%s %s %s %s %s" % (one, two, three, four, five)
2295
2298
2296 def savecommitmessage(self, text):
2299 def savecommitmessage(self, text):
2297 fp = self.opener('last-message.txt', 'wb')
2300 fp = self.opener('last-message.txt', 'wb')
2298 try:
2301 try:
2299 fp.write(text)
2302 fp.write(text)
2300 finally:
2303 finally:
2301 fp.close()
2304 fp.close()
2302 return self.pathto(fp.name[len(self.root)+1:])
2305 return self.pathto(fp.name[len(self.root)+1:])
2303
2306
2304 # used to avoid circular references so destructors work
2307 # used to avoid circular references so destructors work
2305 def aftertrans(files):
2308 def aftertrans(files):
2306 renamefiles = [tuple(t) for t in files]
2309 renamefiles = [tuple(t) for t in files]
2307 def a():
2310 def a():
2308 for src, dest in renamefiles:
2311 for src, dest in renamefiles:
2309 util.rename(src, dest)
2312 util.rename(src, dest)
2310 return a
2313 return a
2311
2314
2312 def undoname(fn):
2315 def undoname(fn):
2313 base, name = os.path.split(fn)
2316 base, name = os.path.split(fn)
2314 assert name.startswith('journal')
2317 assert name.startswith('journal')
2315 return os.path.join(base, name.replace('journal', 'undo', 1))
2318 return os.path.join(base, name.replace('journal', 'undo', 1))
2316
2319
2317 def instance(ui, path, create):
2320 def instance(ui, path, create):
2318 return localrepository(ui, util.urllocalpath(path), create)
2321 return localrepository(ui, util.urllocalpath(path), create)
2319
2322
2320 def islocal(path):
2323 def islocal(path):
2321 return True
2324 return True
General Comments 0
You need to be logged in to leave comments. Login now