##// END OF EJS Templates
localrepo: use lock.release for single lock
Simon Heimberg -
r8646:60f9e574 default
parent child Browse files
Show More
@@ -1,2134 +1,2132
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16 from lock import release
16 from lock import release
17 import weakref, stat, errno, os, time, inspect
17 import weakref, stat, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19
19
20 class localrepository(repo.repository):
20 class localrepository(repo.repository):
21 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
21 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
22 supported = set('revlogv1 store fncache'.split())
22 supported = set('revlogv1 store fncache'.split())
23
23
24 def __init__(self, baseui, path=None, create=0):
24 def __init__(self, baseui, path=None, create=0):
25 repo.repository.__init__(self)
25 repo.repository.__init__(self)
26 self.root = os.path.realpath(path)
26 self.root = os.path.realpath(path)
27 self.path = os.path.join(self.root, ".hg")
27 self.path = os.path.join(self.root, ".hg")
28 self.origroot = path
28 self.origroot = path
29 self.opener = util.opener(self.path)
29 self.opener = util.opener(self.path)
30 self.wopener = util.opener(self.root)
30 self.wopener = util.opener(self.root)
31
31
32 if not os.path.isdir(self.path):
32 if not os.path.isdir(self.path):
33 if create:
33 if create:
34 if not os.path.exists(path):
34 if not os.path.exists(path):
35 os.mkdir(path)
35 os.mkdir(path)
36 os.mkdir(self.path)
36 os.mkdir(self.path)
37 requirements = ["revlogv1"]
37 requirements = ["revlogv1"]
38 if baseui.configbool('format', 'usestore', True):
38 if baseui.configbool('format', 'usestore', True):
39 os.mkdir(os.path.join(self.path, "store"))
39 os.mkdir(os.path.join(self.path, "store"))
40 requirements.append("store")
40 requirements.append("store")
41 if baseui.configbool('format', 'usefncache', True):
41 if baseui.configbool('format', 'usefncache', True):
42 requirements.append("fncache")
42 requirements.append("fncache")
43 # create an invalid changelog
43 # create an invalid changelog
44 self.opener("00changelog.i", "a").write(
44 self.opener("00changelog.i", "a").write(
45 '\0\0\0\2' # represents revlogv2
45 '\0\0\0\2' # represents revlogv2
46 ' dummy changelog to prevent using the old repo layout'
46 ' dummy changelog to prevent using the old repo layout'
47 )
47 )
48 reqfile = self.opener("requires", "w")
48 reqfile = self.opener("requires", "w")
49 for r in requirements:
49 for r in requirements:
50 reqfile.write("%s\n" % r)
50 reqfile.write("%s\n" % r)
51 reqfile.close()
51 reqfile.close()
52 else:
52 else:
53 raise error.RepoError(_("repository %s not found") % path)
53 raise error.RepoError(_("repository %s not found") % path)
54 elif create:
54 elif create:
55 raise error.RepoError(_("repository %s already exists") % path)
55 raise error.RepoError(_("repository %s already exists") % path)
56 else:
56 else:
57 # find requirements
57 # find requirements
58 requirements = set()
58 requirements = set()
59 try:
59 try:
60 requirements = set(self.opener("requires").read().splitlines())
60 requirements = set(self.opener("requires").read().splitlines())
61 except IOError, inst:
61 except IOError, inst:
62 if inst.errno != errno.ENOENT:
62 if inst.errno != errno.ENOENT:
63 raise
63 raise
64 for r in requirements - self.supported:
64 for r in requirements - self.supported:
65 raise error.RepoError(_("requirement '%s' not supported") % r)
65 raise error.RepoError(_("requirement '%s' not supported") % r)
66
66
67 self.store = store.store(requirements, self.path, util.opener)
67 self.store = store.store(requirements, self.path, util.opener)
68 self.spath = self.store.path
68 self.spath = self.store.path
69 self.sopener = self.store.opener
69 self.sopener = self.store.opener
70 self.sjoin = self.store.join
70 self.sjoin = self.store.join
71 self.opener.createmode = self.store.createmode
71 self.opener.createmode = self.store.createmode
72
72
73 self.baseui = baseui
73 self.baseui = baseui
74 self.ui = baseui.copy()
74 self.ui = baseui.copy()
75 try:
75 try:
76 self.ui.readconfig(self.join("hgrc"), self.root)
76 self.ui.readconfig(self.join("hgrc"), self.root)
77 extensions.loadall(self.ui)
77 extensions.loadall(self.ui)
78 except IOError:
78 except IOError:
79 pass
79 pass
80
80
81 self.tagscache = None
81 self.tagscache = None
82 self._tagstypecache = None
82 self._tagstypecache = None
83 self.branchcache = None
83 self.branchcache = None
84 self._ubranchcache = None # UTF-8 version of branchcache
84 self._ubranchcache = None # UTF-8 version of branchcache
85 self._branchcachetip = None
85 self._branchcachetip = None
86 self.nodetagscache = None
86 self.nodetagscache = None
87 self.filterpats = {}
87 self.filterpats = {}
88 self._datafilters = {}
88 self._datafilters = {}
89 self._transref = self._lockref = self._wlockref = None
89 self._transref = self._lockref = self._wlockref = None
90
90
91 @propertycache
91 @propertycache
92 def changelog(self):
92 def changelog(self):
93 c = changelog.changelog(self.sopener)
93 c = changelog.changelog(self.sopener)
94 if 'HG_PENDING' in os.environ:
94 if 'HG_PENDING' in os.environ:
95 p = os.environ['HG_PENDING']
95 p = os.environ['HG_PENDING']
96 if p.startswith(self.root):
96 if p.startswith(self.root):
97 c.readpending('00changelog.i.a')
97 c.readpending('00changelog.i.a')
98 self.sopener.defversion = c.version
98 self.sopener.defversion = c.version
99 return c
99 return c
100
100
101 @propertycache
101 @propertycache
102 def manifest(self):
102 def manifest(self):
103 return manifest.manifest(self.sopener)
103 return manifest.manifest(self.sopener)
104
104
105 @propertycache
105 @propertycache
106 def dirstate(self):
106 def dirstate(self):
107 return dirstate.dirstate(self.opener, self.ui, self.root)
107 return dirstate.dirstate(self.opener, self.ui, self.root)
108
108
109 def __getitem__(self, changeid):
109 def __getitem__(self, changeid):
110 if changeid is None:
110 if changeid is None:
111 return context.workingctx(self)
111 return context.workingctx(self)
112 return context.changectx(self, changeid)
112 return context.changectx(self, changeid)
113
113
114 def __nonzero__(self):
114 def __nonzero__(self):
115 return True
115 return True
116
116
117 def __len__(self):
117 def __len__(self):
118 return len(self.changelog)
118 return len(self.changelog)
119
119
120 def __iter__(self):
120 def __iter__(self):
121 for i in xrange(len(self)):
121 for i in xrange(len(self)):
122 yield i
122 yield i
123
123
124 def url(self):
124 def url(self):
125 return 'file:' + self.root
125 return 'file:' + self.root
126
126
127 def hook(self, name, throw=False, **args):
127 def hook(self, name, throw=False, **args):
128 return hook.hook(self.ui, self, name, throw, **args)
128 return hook.hook(self.ui, self, name, throw, **args)
129
129
130 tag_disallowed = ':\r\n'
130 tag_disallowed = ':\r\n'
131
131
132 def _tag(self, names, node, message, local, user, date, extra={}):
132 def _tag(self, names, node, message, local, user, date, extra={}):
133 if isinstance(names, str):
133 if isinstance(names, str):
134 allchars = names
134 allchars = names
135 names = (names,)
135 names = (names,)
136 else:
136 else:
137 allchars = ''.join(names)
137 allchars = ''.join(names)
138 for c in self.tag_disallowed:
138 for c in self.tag_disallowed:
139 if c in allchars:
139 if c in allchars:
140 raise util.Abort(_('%r cannot be used in a tag name') % c)
140 raise util.Abort(_('%r cannot be used in a tag name') % c)
141
141
142 for name in names:
142 for name in names:
143 self.hook('pretag', throw=True, node=hex(node), tag=name,
143 self.hook('pretag', throw=True, node=hex(node), tag=name,
144 local=local)
144 local=local)
145
145
146 def writetags(fp, names, munge, prevtags):
146 def writetags(fp, names, munge, prevtags):
147 fp.seek(0, 2)
147 fp.seek(0, 2)
148 if prevtags and prevtags[-1] != '\n':
148 if prevtags and prevtags[-1] != '\n':
149 fp.write('\n')
149 fp.write('\n')
150 for name in names:
150 for name in names:
151 m = munge and munge(name) or name
151 m = munge and munge(name) or name
152 if self._tagstypecache and name in self._tagstypecache:
152 if self._tagstypecache and name in self._tagstypecache:
153 old = self.tagscache.get(name, nullid)
153 old = self.tagscache.get(name, nullid)
154 fp.write('%s %s\n' % (hex(old), m))
154 fp.write('%s %s\n' % (hex(old), m))
155 fp.write('%s %s\n' % (hex(node), m))
155 fp.write('%s %s\n' % (hex(node), m))
156 fp.close()
156 fp.close()
157
157
158 prevtags = ''
158 prevtags = ''
159 if local:
159 if local:
160 try:
160 try:
161 fp = self.opener('localtags', 'r+')
161 fp = self.opener('localtags', 'r+')
162 except IOError:
162 except IOError:
163 fp = self.opener('localtags', 'a')
163 fp = self.opener('localtags', 'a')
164 else:
164 else:
165 prevtags = fp.read()
165 prevtags = fp.read()
166
166
167 # local tags are stored in the current charset
167 # local tags are stored in the current charset
168 writetags(fp, names, None, prevtags)
168 writetags(fp, names, None, prevtags)
169 for name in names:
169 for name in names:
170 self.hook('tag', node=hex(node), tag=name, local=local)
170 self.hook('tag', node=hex(node), tag=name, local=local)
171 return
171 return
172
172
173 try:
173 try:
174 fp = self.wfile('.hgtags', 'rb+')
174 fp = self.wfile('.hgtags', 'rb+')
175 except IOError:
175 except IOError:
176 fp = self.wfile('.hgtags', 'ab')
176 fp = self.wfile('.hgtags', 'ab')
177 else:
177 else:
178 prevtags = fp.read()
178 prevtags = fp.read()
179
179
180 # committed tags are stored in UTF-8
180 # committed tags are stored in UTF-8
181 writetags(fp, names, encoding.fromlocal, prevtags)
181 writetags(fp, names, encoding.fromlocal, prevtags)
182
182
183 if '.hgtags' not in self.dirstate:
183 if '.hgtags' not in self.dirstate:
184 self.add(['.hgtags'])
184 self.add(['.hgtags'])
185
185
186 tagnode = self.commit(['.hgtags'], message, user, date, extra=extra)
186 tagnode = self.commit(['.hgtags'], message, user, date, extra=extra)
187
187
188 for name in names:
188 for name in names:
189 self.hook('tag', node=hex(node), tag=name, local=local)
189 self.hook('tag', node=hex(node), tag=name, local=local)
190
190
191 return tagnode
191 return tagnode
192
192
193 def tag(self, names, node, message, local, user, date):
193 def tag(self, names, node, message, local, user, date):
194 '''tag a revision with one or more symbolic names.
194 '''tag a revision with one or more symbolic names.
195
195
196 names is a list of strings or, when adding a single tag, names may be a
196 names is a list of strings or, when adding a single tag, names may be a
197 string.
197 string.
198
198
199 if local is True, the tags are stored in a per-repository file.
199 if local is True, the tags are stored in a per-repository file.
200 otherwise, they are stored in the .hgtags file, and a new
200 otherwise, they are stored in the .hgtags file, and a new
201 changeset is committed with the change.
201 changeset is committed with the change.
202
202
203 keyword arguments:
203 keyword arguments:
204
204
205 local: whether to store tags in non-version-controlled file
205 local: whether to store tags in non-version-controlled file
206 (default False)
206 (default False)
207
207
208 message: commit message to use if committing
208 message: commit message to use if committing
209
209
210 user: name of user to use if committing
210 user: name of user to use if committing
211
211
212 date: date tuple to use if committing'''
212 date: date tuple to use if committing'''
213
213
214 for x in self.status()[:5]:
214 for x in self.status()[:5]:
215 if '.hgtags' in x:
215 if '.hgtags' in x:
216 raise util.Abort(_('working copy of .hgtags is changed '
216 raise util.Abort(_('working copy of .hgtags is changed '
217 '(please commit .hgtags manually)'))
217 '(please commit .hgtags manually)'))
218
218
219 self.tags() # instantiate the cache
219 self.tags() # instantiate the cache
220 self._tag(names, node, message, local, user, date)
220 self._tag(names, node, message, local, user, date)
221
221
222 def tags(self):
222 def tags(self):
223 '''return a mapping of tag to node'''
223 '''return a mapping of tag to node'''
224 if self.tagscache:
224 if self.tagscache:
225 return self.tagscache
225 return self.tagscache
226
226
227 globaltags = {}
227 globaltags = {}
228 tagtypes = {}
228 tagtypes = {}
229
229
230 def readtags(lines, fn, tagtype):
230 def readtags(lines, fn, tagtype):
231 filetags = {}
231 filetags = {}
232 count = 0
232 count = 0
233
233
234 def warn(msg):
234 def warn(msg):
235 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
235 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
236
236
237 for l in lines:
237 for l in lines:
238 count += 1
238 count += 1
239 if not l:
239 if not l:
240 continue
240 continue
241 s = l.split(" ", 1)
241 s = l.split(" ", 1)
242 if len(s) != 2:
242 if len(s) != 2:
243 warn(_("cannot parse entry"))
243 warn(_("cannot parse entry"))
244 continue
244 continue
245 node, key = s
245 node, key = s
246 key = encoding.tolocal(key.strip()) # stored in UTF-8
246 key = encoding.tolocal(key.strip()) # stored in UTF-8
247 try:
247 try:
248 bin_n = bin(node)
248 bin_n = bin(node)
249 except TypeError:
249 except TypeError:
250 warn(_("node '%s' is not well formed") % node)
250 warn(_("node '%s' is not well formed") % node)
251 continue
251 continue
252 if bin_n not in self.changelog.nodemap:
252 if bin_n not in self.changelog.nodemap:
253 warn(_("tag '%s' refers to unknown node") % key)
253 warn(_("tag '%s' refers to unknown node") % key)
254 continue
254 continue
255
255
256 h = []
256 h = []
257 if key in filetags:
257 if key in filetags:
258 n, h = filetags[key]
258 n, h = filetags[key]
259 h.append(n)
259 h.append(n)
260 filetags[key] = (bin_n, h)
260 filetags[key] = (bin_n, h)
261
261
262 for k, nh in filetags.iteritems():
262 for k, nh in filetags.iteritems():
263 if k not in globaltags:
263 if k not in globaltags:
264 globaltags[k] = nh
264 globaltags[k] = nh
265 tagtypes[k] = tagtype
265 tagtypes[k] = tagtype
266 continue
266 continue
267
267
268 # we prefer the global tag if:
268 # we prefer the global tag if:
269 # it supercedes us OR
269 # it supercedes us OR
270 # mutual supercedes and it has a higher rank
270 # mutual supercedes and it has a higher rank
271 # otherwise we win because we're tip-most
271 # otherwise we win because we're tip-most
272 an, ah = nh
272 an, ah = nh
273 bn, bh = globaltags[k]
273 bn, bh = globaltags[k]
274 if (bn != an and an in bh and
274 if (bn != an and an in bh and
275 (bn not in ah or len(bh) > len(ah))):
275 (bn not in ah or len(bh) > len(ah))):
276 an = bn
276 an = bn
277 ah.extend([n for n in bh if n not in ah])
277 ah.extend([n for n in bh if n not in ah])
278 globaltags[k] = an, ah
278 globaltags[k] = an, ah
279 tagtypes[k] = tagtype
279 tagtypes[k] = tagtype
280
280
281 # read the tags file from each head, ending with the tip
281 # read the tags file from each head, ending with the tip
282 f = None
282 f = None
283 for rev, node, fnode in self._hgtagsnodes():
283 for rev, node, fnode in self._hgtagsnodes():
284 f = (f and f.filectx(fnode) or
284 f = (f and f.filectx(fnode) or
285 self.filectx('.hgtags', fileid=fnode))
285 self.filectx('.hgtags', fileid=fnode))
286 readtags(f.data().splitlines(), f, "global")
286 readtags(f.data().splitlines(), f, "global")
287
287
288 try:
288 try:
289 data = encoding.fromlocal(self.opener("localtags").read())
289 data = encoding.fromlocal(self.opener("localtags").read())
290 # localtags are stored in the local character set
290 # localtags are stored in the local character set
291 # while the internal tag table is stored in UTF-8
291 # while the internal tag table is stored in UTF-8
292 readtags(data.splitlines(), "localtags", "local")
292 readtags(data.splitlines(), "localtags", "local")
293 except IOError:
293 except IOError:
294 pass
294 pass
295
295
296 self.tagscache = {}
296 self.tagscache = {}
297 self._tagstypecache = {}
297 self._tagstypecache = {}
298 for k, nh in globaltags.iteritems():
298 for k, nh in globaltags.iteritems():
299 n = nh[0]
299 n = nh[0]
300 if n != nullid:
300 if n != nullid:
301 self.tagscache[k] = n
301 self.tagscache[k] = n
302 self._tagstypecache[k] = tagtypes[k]
302 self._tagstypecache[k] = tagtypes[k]
303 self.tagscache['tip'] = self.changelog.tip()
303 self.tagscache['tip'] = self.changelog.tip()
304 return self.tagscache
304 return self.tagscache
305
305
306 def tagtype(self, tagname):
306 def tagtype(self, tagname):
307 '''
307 '''
308 return the type of the given tag. result can be:
308 return the type of the given tag. result can be:
309
309
310 'local' : a local tag
310 'local' : a local tag
311 'global' : a global tag
311 'global' : a global tag
312 None : tag does not exist
312 None : tag does not exist
313 '''
313 '''
314
314
315 self.tags()
315 self.tags()
316
316
317 return self._tagstypecache.get(tagname)
317 return self._tagstypecache.get(tagname)
318
318
319 def _hgtagsnodes(self):
319 def _hgtagsnodes(self):
320 last = {}
320 last = {}
321 ret = []
321 ret = []
322 for node in reversed(self.heads()):
322 for node in reversed(self.heads()):
323 c = self[node]
323 c = self[node]
324 rev = c.rev()
324 rev = c.rev()
325 try:
325 try:
326 fnode = c.filenode('.hgtags')
326 fnode = c.filenode('.hgtags')
327 except error.LookupError:
327 except error.LookupError:
328 continue
328 continue
329 ret.append((rev, node, fnode))
329 ret.append((rev, node, fnode))
330 if fnode in last:
330 if fnode in last:
331 ret[last[fnode]] = None
331 ret[last[fnode]] = None
332 last[fnode] = len(ret) - 1
332 last[fnode] = len(ret) - 1
333 return [item for item in ret if item]
333 return [item for item in ret if item]
334
334
335 def tagslist(self):
335 def tagslist(self):
336 '''return a list of tags ordered by revision'''
336 '''return a list of tags ordered by revision'''
337 l = []
337 l = []
338 for t, n in self.tags().iteritems():
338 for t, n in self.tags().iteritems():
339 try:
339 try:
340 r = self.changelog.rev(n)
340 r = self.changelog.rev(n)
341 except:
341 except:
342 r = -2 # sort to the beginning of the list if unknown
342 r = -2 # sort to the beginning of the list if unknown
343 l.append((r, t, n))
343 l.append((r, t, n))
344 return [(t, n) for r, t, n in sorted(l)]
344 return [(t, n) for r, t, n in sorted(l)]
345
345
346 def nodetags(self, node):
346 def nodetags(self, node):
347 '''return the tags associated with a node'''
347 '''return the tags associated with a node'''
348 if not self.nodetagscache:
348 if not self.nodetagscache:
349 self.nodetagscache = {}
349 self.nodetagscache = {}
350 for t, n in self.tags().iteritems():
350 for t, n in self.tags().iteritems():
351 self.nodetagscache.setdefault(n, []).append(t)
351 self.nodetagscache.setdefault(n, []).append(t)
352 return self.nodetagscache.get(node, [])
352 return self.nodetagscache.get(node, [])
353
353
354 def _branchtags(self, partial, lrev):
354 def _branchtags(self, partial, lrev):
355 # TODO: rename this function?
355 # TODO: rename this function?
356 tiprev = len(self) - 1
356 tiprev = len(self) - 1
357 if lrev != tiprev:
357 if lrev != tiprev:
358 self._updatebranchcache(partial, lrev+1, tiprev+1)
358 self._updatebranchcache(partial, lrev+1, tiprev+1)
359 self._writebranchcache(partial, self.changelog.tip(), tiprev)
359 self._writebranchcache(partial, self.changelog.tip(), tiprev)
360
360
361 return partial
361 return partial
362
362
363 def branchmap(self):
363 def branchmap(self):
364 tip = self.changelog.tip()
364 tip = self.changelog.tip()
365 if self.branchcache is not None and self._branchcachetip == tip:
365 if self.branchcache is not None and self._branchcachetip == tip:
366 return self.branchcache
366 return self.branchcache
367
367
368 oldtip = self._branchcachetip
368 oldtip = self._branchcachetip
369 self._branchcachetip = tip
369 self._branchcachetip = tip
370 if self.branchcache is None:
370 if self.branchcache is None:
371 self.branchcache = {} # avoid recursion in changectx
371 self.branchcache = {} # avoid recursion in changectx
372 else:
372 else:
373 self.branchcache.clear() # keep using the same dict
373 self.branchcache.clear() # keep using the same dict
374 if oldtip is None or oldtip not in self.changelog.nodemap:
374 if oldtip is None or oldtip not in self.changelog.nodemap:
375 partial, last, lrev = self._readbranchcache()
375 partial, last, lrev = self._readbranchcache()
376 else:
376 else:
377 lrev = self.changelog.rev(oldtip)
377 lrev = self.changelog.rev(oldtip)
378 partial = self._ubranchcache
378 partial = self._ubranchcache
379
379
380 self._branchtags(partial, lrev)
380 self._branchtags(partial, lrev)
381 # this private cache holds all heads (not just tips)
381 # this private cache holds all heads (not just tips)
382 self._ubranchcache = partial
382 self._ubranchcache = partial
383
383
384 # the branch cache is stored on disk as UTF-8, but in the local
384 # the branch cache is stored on disk as UTF-8, but in the local
385 # charset internally
385 # charset internally
386 for k, v in partial.iteritems():
386 for k, v in partial.iteritems():
387 self.branchcache[encoding.tolocal(k)] = v
387 self.branchcache[encoding.tolocal(k)] = v
388 return self.branchcache
388 return self.branchcache
389
389
390
390
391 def branchtags(self):
391 def branchtags(self):
392 '''return a dict where branch names map to the tipmost head of
392 '''return a dict where branch names map to the tipmost head of
393 the branch, open heads come before closed'''
393 the branch, open heads come before closed'''
394 bt = {}
394 bt = {}
395 for bn, heads in self.branchmap().iteritems():
395 for bn, heads in self.branchmap().iteritems():
396 head = None
396 head = None
397 for i in range(len(heads)-1, -1, -1):
397 for i in range(len(heads)-1, -1, -1):
398 h = heads[i]
398 h = heads[i]
399 if 'close' not in self.changelog.read(h)[5]:
399 if 'close' not in self.changelog.read(h)[5]:
400 head = h
400 head = h
401 break
401 break
402 # no open heads were found
402 # no open heads were found
403 if head is None:
403 if head is None:
404 head = heads[-1]
404 head = heads[-1]
405 bt[bn] = head
405 bt[bn] = head
406 return bt
406 return bt
407
407
408
408
409 def _readbranchcache(self):
409 def _readbranchcache(self):
410 partial = {}
410 partial = {}
411 try:
411 try:
412 f = self.opener("branchheads.cache")
412 f = self.opener("branchheads.cache")
413 lines = f.read().split('\n')
413 lines = f.read().split('\n')
414 f.close()
414 f.close()
415 except (IOError, OSError):
415 except (IOError, OSError):
416 return {}, nullid, nullrev
416 return {}, nullid, nullrev
417
417
418 try:
418 try:
419 last, lrev = lines.pop(0).split(" ", 1)
419 last, lrev = lines.pop(0).split(" ", 1)
420 last, lrev = bin(last), int(lrev)
420 last, lrev = bin(last), int(lrev)
421 if lrev >= len(self) or self[lrev].node() != last:
421 if lrev >= len(self) or self[lrev].node() != last:
422 # invalidate the cache
422 # invalidate the cache
423 raise ValueError('invalidating branch cache (tip differs)')
423 raise ValueError('invalidating branch cache (tip differs)')
424 for l in lines:
424 for l in lines:
425 if not l: continue
425 if not l: continue
426 node, label = l.split(" ", 1)
426 node, label = l.split(" ", 1)
427 partial.setdefault(label.strip(), []).append(bin(node))
427 partial.setdefault(label.strip(), []).append(bin(node))
428 except KeyboardInterrupt:
428 except KeyboardInterrupt:
429 raise
429 raise
430 except Exception, inst:
430 except Exception, inst:
431 if self.ui.debugflag:
431 if self.ui.debugflag:
432 self.ui.warn(str(inst), '\n')
432 self.ui.warn(str(inst), '\n')
433 partial, last, lrev = {}, nullid, nullrev
433 partial, last, lrev = {}, nullid, nullrev
434 return partial, last, lrev
434 return partial, last, lrev
435
435
436 def _writebranchcache(self, branches, tip, tiprev):
436 def _writebranchcache(self, branches, tip, tiprev):
437 try:
437 try:
438 f = self.opener("branchheads.cache", "w", atomictemp=True)
438 f = self.opener("branchheads.cache", "w", atomictemp=True)
439 f.write("%s %s\n" % (hex(tip), tiprev))
439 f.write("%s %s\n" % (hex(tip), tiprev))
440 for label, nodes in branches.iteritems():
440 for label, nodes in branches.iteritems():
441 for node in nodes:
441 for node in nodes:
442 f.write("%s %s\n" % (hex(node), label))
442 f.write("%s %s\n" % (hex(node), label))
443 f.rename()
443 f.rename()
444 except (IOError, OSError):
444 except (IOError, OSError):
445 pass
445 pass
446
446
447 def _updatebranchcache(self, partial, start, end):
447 def _updatebranchcache(self, partial, start, end):
448 for r in xrange(start, end):
448 for r in xrange(start, end):
449 c = self[r]
449 c = self[r]
450 b = c.branch()
450 b = c.branch()
451 bheads = partial.setdefault(b, [])
451 bheads = partial.setdefault(b, [])
452 bheads.append(c.node())
452 bheads.append(c.node())
453 for p in c.parents():
453 for p in c.parents():
454 pn = p.node()
454 pn = p.node()
455 if pn in bheads:
455 if pn in bheads:
456 bheads.remove(pn)
456 bheads.remove(pn)
457
457
458 def lookup(self, key):
458 def lookup(self, key):
459 if isinstance(key, int):
459 if isinstance(key, int):
460 return self.changelog.node(key)
460 return self.changelog.node(key)
461 elif key == '.':
461 elif key == '.':
462 return self.dirstate.parents()[0]
462 return self.dirstate.parents()[0]
463 elif key == 'null':
463 elif key == 'null':
464 return nullid
464 return nullid
465 elif key == 'tip':
465 elif key == 'tip':
466 return self.changelog.tip()
466 return self.changelog.tip()
467 n = self.changelog._match(key)
467 n = self.changelog._match(key)
468 if n:
468 if n:
469 return n
469 return n
470 if key in self.tags():
470 if key in self.tags():
471 return self.tags()[key]
471 return self.tags()[key]
472 if key in self.branchtags():
472 if key in self.branchtags():
473 return self.branchtags()[key]
473 return self.branchtags()[key]
474 n = self.changelog._partialmatch(key)
474 n = self.changelog._partialmatch(key)
475 if n:
475 if n:
476 return n
476 return n
477
477
478 # can't find key, check if it might have come from damaged dirstate
478 # can't find key, check if it might have come from damaged dirstate
479 if key in self.dirstate.parents():
479 if key in self.dirstate.parents():
480 raise error.Abort(_("working directory has unknown parent '%s'!")
480 raise error.Abort(_("working directory has unknown parent '%s'!")
481 % short(key))
481 % short(key))
482 try:
482 try:
483 if len(key) == 20:
483 if len(key) == 20:
484 key = hex(key)
484 key = hex(key)
485 except:
485 except:
486 pass
486 pass
487 raise error.RepoError(_("unknown revision '%s'") % key)
487 raise error.RepoError(_("unknown revision '%s'") % key)
488
488
489 def local(self):
489 def local(self):
490 return True
490 return True
491
491
492 def join(self, f):
492 def join(self, f):
493 return os.path.join(self.path, f)
493 return os.path.join(self.path, f)
494
494
495 def wjoin(self, f):
495 def wjoin(self, f):
496 return os.path.join(self.root, f)
496 return os.path.join(self.root, f)
497
497
498 def rjoin(self, f):
498 def rjoin(self, f):
499 return os.path.join(self.root, util.pconvert(f))
499 return os.path.join(self.root, util.pconvert(f))
500
500
501 def file(self, f):
501 def file(self, f):
502 if f[0] == '/':
502 if f[0] == '/':
503 f = f[1:]
503 f = f[1:]
504 return filelog.filelog(self.sopener, f)
504 return filelog.filelog(self.sopener, f)
505
505
506 def changectx(self, changeid):
506 def changectx(self, changeid):
507 return self[changeid]
507 return self[changeid]
508
508
509 def parents(self, changeid=None):
509 def parents(self, changeid=None):
510 '''get list of changectxs for parents of changeid'''
510 '''get list of changectxs for parents of changeid'''
511 return self[changeid].parents()
511 return self[changeid].parents()
512
512
513 def filectx(self, path, changeid=None, fileid=None):
513 def filectx(self, path, changeid=None, fileid=None):
514 """changeid can be a changeset revision, node, or tag.
514 """changeid can be a changeset revision, node, or tag.
515 fileid can be a file revision or node."""
515 fileid can be a file revision or node."""
516 return context.filectx(self, path, changeid, fileid)
516 return context.filectx(self, path, changeid, fileid)
517
517
518 def getcwd(self):
518 def getcwd(self):
519 return self.dirstate.getcwd()
519 return self.dirstate.getcwd()
520
520
521 def pathto(self, f, cwd=None):
521 def pathto(self, f, cwd=None):
522 return self.dirstate.pathto(f, cwd)
522 return self.dirstate.pathto(f, cwd)
523
523
524 def wfile(self, f, mode='r'):
524 def wfile(self, f, mode='r'):
525 return self.wopener(f, mode)
525 return self.wopener(f, mode)
526
526
527 def _link(self, f):
527 def _link(self, f):
528 return os.path.islink(self.wjoin(f))
528 return os.path.islink(self.wjoin(f))
529
529
530 def _filter(self, filter, filename, data):
530 def _filter(self, filter, filename, data):
531 if filter not in self.filterpats:
531 if filter not in self.filterpats:
532 l = []
532 l = []
533 for pat, cmd in self.ui.configitems(filter):
533 for pat, cmd in self.ui.configitems(filter):
534 if cmd == '!':
534 if cmd == '!':
535 continue
535 continue
536 mf = match_.match(self.root, '', [pat])
536 mf = match_.match(self.root, '', [pat])
537 fn = None
537 fn = None
538 params = cmd
538 params = cmd
539 for name, filterfn in self._datafilters.iteritems():
539 for name, filterfn in self._datafilters.iteritems():
540 if cmd.startswith(name):
540 if cmd.startswith(name):
541 fn = filterfn
541 fn = filterfn
542 params = cmd[len(name):].lstrip()
542 params = cmd[len(name):].lstrip()
543 break
543 break
544 if not fn:
544 if not fn:
545 fn = lambda s, c, **kwargs: util.filter(s, c)
545 fn = lambda s, c, **kwargs: util.filter(s, c)
546 # Wrap old filters not supporting keyword arguments
546 # Wrap old filters not supporting keyword arguments
547 if not inspect.getargspec(fn)[2]:
547 if not inspect.getargspec(fn)[2]:
548 oldfn = fn
548 oldfn = fn
549 fn = lambda s, c, **kwargs: oldfn(s, c)
549 fn = lambda s, c, **kwargs: oldfn(s, c)
550 l.append((mf, fn, params))
550 l.append((mf, fn, params))
551 self.filterpats[filter] = l
551 self.filterpats[filter] = l
552
552
553 for mf, fn, cmd in self.filterpats[filter]:
553 for mf, fn, cmd in self.filterpats[filter]:
554 if mf(filename):
554 if mf(filename):
555 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
555 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
556 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
556 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
557 break
557 break
558
558
559 return data
559 return data
560
560
561 def adddatafilter(self, name, filter):
561 def adddatafilter(self, name, filter):
562 self._datafilters[name] = filter
562 self._datafilters[name] = filter
563
563
564 def wread(self, filename):
564 def wread(self, filename):
565 if self._link(filename):
565 if self._link(filename):
566 data = os.readlink(self.wjoin(filename))
566 data = os.readlink(self.wjoin(filename))
567 else:
567 else:
568 data = self.wopener(filename, 'r').read()
568 data = self.wopener(filename, 'r').read()
569 return self._filter("encode", filename, data)
569 return self._filter("encode", filename, data)
570
570
571 def wwrite(self, filename, data, flags):
571 def wwrite(self, filename, data, flags):
572 data = self._filter("decode", filename, data)
572 data = self._filter("decode", filename, data)
573 try:
573 try:
574 os.unlink(self.wjoin(filename))
574 os.unlink(self.wjoin(filename))
575 except OSError:
575 except OSError:
576 pass
576 pass
577 if 'l' in flags:
577 if 'l' in flags:
578 self.wopener.symlink(data, filename)
578 self.wopener.symlink(data, filename)
579 else:
579 else:
580 self.wopener(filename, 'w').write(data)
580 self.wopener(filename, 'w').write(data)
581 if 'x' in flags:
581 if 'x' in flags:
582 util.set_flags(self.wjoin(filename), False, True)
582 util.set_flags(self.wjoin(filename), False, True)
583
583
584 def wwritedata(self, filename, data):
584 def wwritedata(self, filename, data):
585 return self._filter("decode", filename, data)
585 return self._filter("decode", filename, data)
586
586
587 def transaction(self):
587 def transaction(self):
588 tr = self._transref and self._transref() or None
588 tr = self._transref and self._transref() or None
589 if tr and tr.running():
589 if tr and tr.running():
590 return tr.nest()
590 return tr.nest()
591
591
592 # abort here if the journal already exists
592 # abort here if the journal already exists
593 if os.path.exists(self.sjoin("journal")):
593 if os.path.exists(self.sjoin("journal")):
594 raise error.RepoError(_("journal already exists - run hg recover"))
594 raise error.RepoError(_("journal already exists - run hg recover"))
595
595
596 # save dirstate for rollback
596 # save dirstate for rollback
597 try:
597 try:
598 ds = self.opener("dirstate").read()
598 ds = self.opener("dirstate").read()
599 except IOError:
599 except IOError:
600 ds = ""
600 ds = ""
601 self.opener("journal.dirstate", "w").write(ds)
601 self.opener("journal.dirstate", "w").write(ds)
602 self.opener("journal.branch", "w").write(self.dirstate.branch())
602 self.opener("journal.branch", "w").write(self.dirstate.branch())
603
603
604 renames = [(self.sjoin("journal"), self.sjoin("undo")),
604 renames = [(self.sjoin("journal"), self.sjoin("undo")),
605 (self.join("journal.dirstate"), self.join("undo.dirstate")),
605 (self.join("journal.dirstate"), self.join("undo.dirstate")),
606 (self.join("journal.branch"), self.join("undo.branch"))]
606 (self.join("journal.branch"), self.join("undo.branch"))]
607 tr = transaction.transaction(self.ui.warn, self.sopener,
607 tr = transaction.transaction(self.ui.warn, self.sopener,
608 self.sjoin("journal"),
608 self.sjoin("journal"),
609 aftertrans(renames),
609 aftertrans(renames),
610 self.store.createmode)
610 self.store.createmode)
611 self._transref = weakref.ref(tr)
611 self._transref = weakref.ref(tr)
612 return tr
612 return tr
613
613
614 def recover(self):
614 def recover(self):
615 lock = self.lock()
615 lock = self.lock()
616 try:
616 try:
617 if os.path.exists(self.sjoin("journal")):
617 if os.path.exists(self.sjoin("journal")):
618 self.ui.status(_("rolling back interrupted transaction\n"))
618 self.ui.status(_("rolling back interrupted transaction\n"))
619 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
619 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
620 self.invalidate()
620 self.invalidate()
621 return True
621 return True
622 else:
622 else:
623 self.ui.warn(_("no interrupted transaction available\n"))
623 self.ui.warn(_("no interrupted transaction available\n"))
624 return False
624 return False
625 finally:
625 finally:
626 lock.release()
626 lock.release()
627
627
628 def rollback(self):
628 def rollback(self):
629 wlock = lock = None
629 wlock = lock = None
630 try:
630 try:
631 wlock = self.wlock()
631 wlock = self.wlock()
632 lock = self.lock()
632 lock = self.lock()
633 if os.path.exists(self.sjoin("undo")):
633 if os.path.exists(self.sjoin("undo")):
634 self.ui.status(_("rolling back last transaction\n"))
634 self.ui.status(_("rolling back last transaction\n"))
635 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
635 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
636 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
636 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
637 try:
637 try:
638 branch = self.opener("undo.branch").read()
638 branch = self.opener("undo.branch").read()
639 self.dirstate.setbranch(branch)
639 self.dirstate.setbranch(branch)
640 except IOError:
640 except IOError:
641 self.ui.warn(_("Named branch could not be reset, "
641 self.ui.warn(_("Named branch could not be reset, "
642 "current branch still is: %s\n")
642 "current branch still is: %s\n")
643 % encoding.tolocal(self.dirstate.branch()))
643 % encoding.tolocal(self.dirstate.branch()))
644 self.invalidate()
644 self.invalidate()
645 self.dirstate.invalidate()
645 self.dirstate.invalidate()
646 else:
646 else:
647 self.ui.warn(_("no rollback information available\n"))
647 self.ui.warn(_("no rollback information available\n"))
648 finally:
648 finally:
649 release(lock, wlock)
649 release(lock, wlock)
650
650
651 def invalidate(self):
651 def invalidate(self):
652 for a in "changelog manifest".split():
652 for a in "changelog manifest".split():
653 if a in self.__dict__:
653 if a in self.__dict__:
654 delattr(self, a)
654 delattr(self, a)
655 self.tagscache = None
655 self.tagscache = None
656 self._tagstypecache = None
656 self._tagstypecache = None
657 self.nodetagscache = None
657 self.nodetagscache = None
658 self.branchcache = None
658 self.branchcache = None
659 self._ubranchcache = None
659 self._ubranchcache = None
660 self._branchcachetip = None
660 self._branchcachetip = None
661
661
662 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
662 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
663 try:
663 try:
664 l = lock.lock(lockname, 0, releasefn, desc=desc)
664 l = lock.lock(lockname, 0, releasefn, desc=desc)
665 except error.LockHeld, inst:
665 except error.LockHeld, inst:
666 if not wait:
666 if not wait:
667 raise
667 raise
668 self.ui.warn(_("waiting for lock on %s held by %r\n") %
668 self.ui.warn(_("waiting for lock on %s held by %r\n") %
669 (desc, inst.locker))
669 (desc, inst.locker))
670 # default to 600 seconds timeout
670 # default to 600 seconds timeout
671 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
671 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
672 releasefn, desc=desc)
672 releasefn, desc=desc)
673 if acquirefn:
673 if acquirefn:
674 acquirefn()
674 acquirefn()
675 return l
675 return l
676
676
677 def lock(self, wait=True):
677 def lock(self, wait=True):
678 l = self._lockref and self._lockref()
678 l = self._lockref and self._lockref()
679 if l is not None and l.held:
679 if l is not None and l.held:
680 l.lock()
680 l.lock()
681 return l
681 return l
682
682
683 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
683 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
684 _('repository %s') % self.origroot)
684 _('repository %s') % self.origroot)
685 self._lockref = weakref.ref(l)
685 self._lockref = weakref.ref(l)
686 return l
686 return l
687
687
688 def wlock(self, wait=True):
688 def wlock(self, wait=True):
689 l = self._wlockref and self._wlockref()
689 l = self._wlockref and self._wlockref()
690 if l is not None and l.held:
690 if l is not None and l.held:
691 l.lock()
691 l.lock()
692 return l
692 return l
693
693
694 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
694 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
695 self.dirstate.invalidate, _('working directory of %s') %
695 self.dirstate.invalidate, _('working directory of %s') %
696 self.origroot)
696 self.origroot)
697 self._wlockref = weakref.ref(l)
697 self._wlockref = weakref.ref(l)
698 return l
698 return l
699
699
700 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
700 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
701 """
701 """
702 commit an individual file as part of a larger transaction
702 commit an individual file as part of a larger transaction
703 """
703 """
704
704
705 fname = fctx.path()
705 fname = fctx.path()
706 text = fctx.data()
706 text = fctx.data()
707 flog = self.file(fname)
707 flog = self.file(fname)
708 fparent1 = manifest1.get(fname, nullid)
708 fparent1 = manifest1.get(fname, nullid)
709 fparent2 = fparent2o = manifest2.get(fname, nullid)
709 fparent2 = fparent2o = manifest2.get(fname, nullid)
710
710
711 meta = {}
711 meta = {}
712 copy = fctx.renamed()
712 copy = fctx.renamed()
713 if copy and copy[0] != fname:
713 if copy and copy[0] != fname:
714 # Mark the new revision of this file as a copy of another
714 # Mark the new revision of this file as a copy of another
715 # file. This copy data will effectively act as a parent
715 # file. This copy data will effectively act as a parent
716 # of this new revision. If this is a merge, the first
716 # of this new revision. If this is a merge, the first
717 # parent will be the nullid (meaning "look up the copy data")
717 # parent will be the nullid (meaning "look up the copy data")
718 # and the second one will be the other parent. For example:
718 # and the second one will be the other parent. For example:
719 #
719 #
720 # 0 --- 1 --- 3 rev1 changes file foo
720 # 0 --- 1 --- 3 rev1 changes file foo
721 # \ / rev2 renames foo to bar and changes it
721 # \ / rev2 renames foo to bar and changes it
722 # \- 2 -/ rev3 should have bar with all changes and
722 # \- 2 -/ rev3 should have bar with all changes and
723 # should record that bar descends from
723 # should record that bar descends from
724 # bar in rev2 and foo in rev1
724 # bar in rev2 and foo in rev1
725 #
725 #
726 # this allows this merge to succeed:
726 # this allows this merge to succeed:
727 #
727 #
728 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
728 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
729 # \ / merging rev3 and rev4 should use bar@rev2
729 # \ / merging rev3 and rev4 should use bar@rev2
730 # \- 2 --- 4 as the merge base
730 # \- 2 --- 4 as the merge base
731 #
731 #
732
732
733 cfname = copy[0]
733 cfname = copy[0]
734 crev = manifest1.get(cfname)
734 crev = manifest1.get(cfname)
735 newfparent = fparent2
735 newfparent = fparent2
736
736
737 if manifest2: # branch merge
737 if manifest2: # branch merge
738 if fparent2 == nullid or crev is None: # copied on remote side
738 if fparent2 == nullid or crev is None: # copied on remote side
739 if cfname in manifest2:
739 if cfname in manifest2:
740 crev = manifest2[cfname]
740 crev = manifest2[cfname]
741 newfparent = fparent1
741 newfparent = fparent1
742
742
743 # find source in nearest ancestor if we've lost track
743 # find source in nearest ancestor if we've lost track
744 if not crev:
744 if not crev:
745 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
745 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
746 (fname, cfname))
746 (fname, cfname))
747 for ancestor in self['.'].ancestors():
747 for ancestor in self['.'].ancestors():
748 if cfname in ancestor:
748 if cfname in ancestor:
749 crev = ancestor[cfname].filenode()
749 crev = ancestor[cfname].filenode()
750 break
750 break
751
751
752 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
752 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
753 meta["copy"] = cfname
753 meta["copy"] = cfname
754 meta["copyrev"] = hex(crev)
754 meta["copyrev"] = hex(crev)
755 fparent1, fparent2 = nullid, newfparent
755 fparent1, fparent2 = nullid, newfparent
756 elif fparent2 != nullid:
756 elif fparent2 != nullid:
757 # is one parent an ancestor of the other?
757 # is one parent an ancestor of the other?
758 fparentancestor = flog.ancestor(fparent1, fparent2)
758 fparentancestor = flog.ancestor(fparent1, fparent2)
759 if fparentancestor == fparent1:
759 if fparentancestor == fparent1:
760 fparent1, fparent2 = fparent2, nullid
760 fparent1, fparent2 = fparent2, nullid
761 elif fparentancestor == fparent2:
761 elif fparentancestor == fparent2:
762 fparent2 = nullid
762 fparent2 = nullid
763
763
764 # is the file changed?
764 # is the file changed?
765 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
765 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
766 changelist.append(fname)
766 changelist.append(fname)
767 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
767 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
768
768
769 # are just the flags changed during merge?
769 # are just the flags changed during merge?
770 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
770 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
771 changelist.append(fname)
771 changelist.append(fname)
772
772
773 return fparent1
773 return fparent1
774
774
775 def commit(self, files=None, text="", user=None, date=None, match=None,
775 def commit(self, files=None, text="", user=None, date=None, match=None,
776 force=False, editor=False, extra={}):
776 force=False, editor=False, extra={}):
777 """Add a new revision to current repository.
777 """Add a new revision to current repository.
778
778
779 Revision information is gathered from the working directory, files and
779 Revision information is gathered from the working directory, files and
780 match can be used to filter the committed files.
780 match can be used to filter the committed files.
781 If editor is supplied, it is called to get a commit message.
781 If editor is supplied, it is called to get a commit message.
782 """
782 """
783 wlock = self.wlock()
783 wlock = self.wlock()
784 try:
784 try:
785 p1, p2 = self.dirstate.parents()
785 p1, p2 = self.dirstate.parents()
786
786
787 if (not force and p2 != nullid and match and
787 if (not force and p2 != nullid and match and
788 (match.files() or match.anypats())):
788 (match.files() or match.anypats())):
789 raise util.Abort(_('cannot partially commit a merge '
789 raise util.Abort(_('cannot partially commit a merge '
790 '(do not specify files or patterns)'))
790 '(do not specify files or patterns)'))
791
791
792 if files:
792 if files:
793 modified, removed = [], []
793 modified, removed = [], []
794 for f in sorted(set(files)):
794 for f in sorted(set(files)):
795 s = self.dirstate[f]
795 s = self.dirstate[f]
796 if s in 'nma':
796 if s in 'nma':
797 modified.append(f)
797 modified.append(f)
798 elif s == 'r':
798 elif s == 'r':
799 removed.append(f)
799 removed.append(f)
800 else:
800 else:
801 self.ui.warn(_("%s not tracked!\n") % f)
801 self.ui.warn(_("%s not tracked!\n") % f)
802 changes = [modified, [], removed, [], []]
802 changes = [modified, [], removed, [], []]
803 else:
803 else:
804 changes = self.status(match=match)
804 changes = self.status(match=match)
805
805
806 if (not force and not extra.get("close") and p2 == nullid
806 if (not force and not extra.get("close") and p2 == nullid
807 and not (changes[0] or changes[1] or changes[2])
807 and not (changes[0] or changes[1] or changes[2])
808 and self[None].branch() == self['.'].branch()):
808 and self[None].branch() == self['.'].branch()):
809 self.ui.status(_("nothing changed\n"))
809 self.ui.status(_("nothing changed\n"))
810 return None
810 return None
811
811
812 ms = merge_.mergestate(self)
812 ms = merge_.mergestate(self)
813 for f in changes[0]:
813 for f in changes[0]:
814 if f in ms and ms[f] == 'u':
814 if f in ms and ms[f] == 'u':
815 raise util.Abort(_("unresolved merge conflicts "
815 raise util.Abort(_("unresolved merge conflicts "
816 "(see hg resolve)"))
816 "(see hg resolve)"))
817
817
818 wctx = context.workingctx(self, (p1, p2), text, user, date,
818 wctx = context.workingctx(self, (p1, p2), text, user, date,
819 extra, changes)
819 extra, changes)
820 if editor:
820 if editor:
821 wctx._text = editor(self, wctx,
821 wctx._text = editor(self, wctx,
822 changes[1], changes[0], changes[2])
822 changes[1], changes[0], changes[2])
823 ret = self.commitctx(wctx, True)
823 ret = self.commitctx(wctx, True)
824
824
825 # update dirstate and mergestate
825 # update dirstate and mergestate
826 for f in changes[0] + changes[1]:
826 for f in changes[0] + changes[1]:
827 self.dirstate.normal(f)
827 self.dirstate.normal(f)
828 for f in changes[2]:
828 for f in changes[2]:
829 self.dirstate.forget(f)
829 self.dirstate.forget(f)
830 self.dirstate.setparents(ret)
830 self.dirstate.setparents(ret)
831 ms.reset()
831 ms.reset()
832
832
833 return ret
833 return ret
834
834
835 finally:
835 finally:
836 wlock.release()
836 wlock.release()
837
837
838 def commitctx(self, ctx, error=False):
838 def commitctx(self, ctx, error=False):
839 """Add a new revision to current repository.
839 """Add a new revision to current repository.
840
840
841 Revision information is passed via the context argument.
841 Revision information is passed via the context argument.
842 """
842 """
843
843
844 tr = lock = None
844 tr = lock = None
845 removed = ctx.removed()
845 removed = ctx.removed()
846 p1, p2 = ctx.p1(), ctx.p2()
846 p1, p2 = ctx.p1(), ctx.p2()
847 m1 = p1.manifest().copy()
847 m1 = p1.manifest().copy()
848 m2 = p2.manifest()
848 m2 = p2.manifest()
849 user = ctx.user()
849 user = ctx.user()
850
850
851 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
851 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
852 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
852 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
853
853
854 lock = self.lock()
854 lock = self.lock()
855 try:
855 try:
856 tr = self.transaction()
856 tr = self.transaction()
857 trp = weakref.proxy(tr)
857 trp = weakref.proxy(tr)
858
858
859 # check in files
859 # check in files
860 new = {}
860 new = {}
861 changed = []
861 changed = []
862 linkrev = len(self)
862 linkrev = len(self)
863 for f in sorted(ctx.modified() + ctx.added()):
863 for f in sorted(ctx.modified() + ctx.added()):
864 self.ui.note(f + "\n")
864 self.ui.note(f + "\n")
865 try:
865 try:
866 fctx = ctx[f]
866 fctx = ctx[f]
867 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
867 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
868 changed)
868 changed)
869 m1.set(f, fctx.flags())
869 m1.set(f, fctx.flags())
870 except (OSError, IOError):
870 except (OSError, IOError):
871 if error:
871 if error:
872 self.ui.warn(_("trouble committing %s!\n") % f)
872 self.ui.warn(_("trouble committing %s!\n") % f)
873 raise
873 raise
874 else:
874 else:
875 removed.append(f)
875 removed.append(f)
876
876
877 # update manifest
877 # update manifest
878 m1.update(new)
878 m1.update(new)
879 removed = [f for f in sorted(removed) if f in m1 or f in m2]
879 removed = [f for f in sorted(removed) if f in m1 or f in m2]
880 drop = [f for f in removed if f in m1]
880 drop = [f for f in removed if f in m1]
881 for f in drop:
881 for f in drop:
882 del m1[f]
882 del m1[f]
883 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
883 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
884 p2.manifestnode(), (new, drop))
884 p2.manifestnode(), (new, drop))
885
885
886 # update changelog
886 # update changelog
887 self.changelog.delayupdate()
887 self.changelog.delayupdate()
888 n = self.changelog.add(mn, changed + removed, ctx.description(),
888 n = self.changelog.add(mn, changed + removed, ctx.description(),
889 trp, p1.node(), p2.node(),
889 trp, p1.node(), p2.node(),
890 user, ctx.date(), ctx.extra().copy())
890 user, ctx.date(), ctx.extra().copy())
891 p = lambda: self.changelog.writepending() and self.root or ""
891 p = lambda: self.changelog.writepending() and self.root or ""
892 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
892 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
893 parent2=xp2, pending=p)
893 parent2=xp2, pending=p)
894 self.changelog.finalize(trp)
894 self.changelog.finalize(trp)
895 tr.close()
895 tr.close()
896
896
897 if self.branchcache:
897 if self.branchcache:
898 self.branchtags()
898 self.branchtags()
899
899
900 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
900 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
901 return n
901 return n
902 finally:
902 finally:
903 del tr
903 del tr
904 lock.release()
904 lock.release()
905
905
906 def walk(self, match, node=None):
906 def walk(self, match, node=None):
907 '''
907 '''
908 walk recursively through the directory tree or a given
908 walk recursively through the directory tree or a given
909 changeset, finding all files matched by the match
909 changeset, finding all files matched by the match
910 function
910 function
911 '''
911 '''
912 return self[node].walk(match)
912 return self[node].walk(match)
913
913
914 def status(self, node1='.', node2=None, match=None,
914 def status(self, node1='.', node2=None, match=None,
915 ignored=False, clean=False, unknown=False):
915 ignored=False, clean=False, unknown=False):
916 """return status of files between two nodes or node and working directory
916 """return status of files between two nodes or node and working directory
917
917
918 If node1 is None, use the first dirstate parent instead.
918 If node1 is None, use the first dirstate parent instead.
919 If node2 is None, compare node1 with working directory.
919 If node2 is None, compare node1 with working directory.
920 """
920 """
921
921
922 def mfmatches(ctx):
922 def mfmatches(ctx):
923 mf = ctx.manifest().copy()
923 mf = ctx.manifest().copy()
924 for fn in mf.keys():
924 for fn in mf.keys():
925 if not match(fn):
925 if not match(fn):
926 del mf[fn]
926 del mf[fn]
927 return mf
927 return mf
928
928
929 if isinstance(node1, context.changectx):
929 if isinstance(node1, context.changectx):
930 ctx1 = node1
930 ctx1 = node1
931 else:
931 else:
932 ctx1 = self[node1]
932 ctx1 = self[node1]
933 if isinstance(node2, context.changectx):
933 if isinstance(node2, context.changectx):
934 ctx2 = node2
934 ctx2 = node2
935 else:
935 else:
936 ctx2 = self[node2]
936 ctx2 = self[node2]
937
937
938 working = ctx2.rev() is None
938 working = ctx2.rev() is None
939 parentworking = working and ctx1 == self['.']
939 parentworking = working and ctx1 == self['.']
940 match = match or match_.always(self.root, self.getcwd())
940 match = match or match_.always(self.root, self.getcwd())
941 listignored, listclean, listunknown = ignored, clean, unknown
941 listignored, listclean, listunknown = ignored, clean, unknown
942
942
943 # load earliest manifest first for caching reasons
943 # load earliest manifest first for caching reasons
944 if not working and ctx2.rev() < ctx1.rev():
944 if not working and ctx2.rev() < ctx1.rev():
945 ctx2.manifest()
945 ctx2.manifest()
946
946
947 if not parentworking:
947 if not parentworking:
948 def bad(f, msg):
948 def bad(f, msg):
949 if f not in ctx1:
949 if f not in ctx1:
950 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
950 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
951 return False
951 return False
952 match.bad = bad
952 match.bad = bad
953
953
954 if working: # we need to scan the working dir
954 if working: # we need to scan the working dir
955 s = self.dirstate.status(match, listignored, listclean, listunknown)
955 s = self.dirstate.status(match, listignored, listclean, listunknown)
956 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
956 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
957
957
958 # check for any possibly clean files
958 # check for any possibly clean files
959 if parentworking and cmp:
959 if parentworking and cmp:
960 fixup = []
960 fixup = []
961 # do a full compare of any files that might have changed
961 # do a full compare of any files that might have changed
962 for f in sorted(cmp):
962 for f in sorted(cmp):
963 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
963 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
964 or ctx1[f].cmp(ctx2[f].data())):
964 or ctx1[f].cmp(ctx2[f].data())):
965 modified.append(f)
965 modified.append(f)
966 else:
966 else:
967 fixup.append(f)
967 fixup.append(f)
968
968
969 if listclean:
969 if listclean:
970 clean += fixup
970 clean += fixup
971
971
972 # update dirstate for files that are actually clean
972 # update dirstate for files that are actually clean
973 if fixup:
973 if fixup:
974 wlock = None
975 try:
974 try:
975 wlock = self.wlock(False)
976 try:
976 try:
977 # updating the dirstate is optional
977 # updating the dirstate is optional
978 # so we don't wait on the lock
978 # so we don't wait on the lock
979 wlock = self.wlock(False)
980 for f in fixup:
979 for f in fixup:
981 self.dirstate.normal(f)
980 self.dirstate.normal(f)
981 finally:
982 wlock.release()
982 except error.LockError:
983 except error.LockError:
983 pass
984 pass
984 finally:
985 release(wlock)
986
985
987 if not parentworking:
986 if not parentworking:
988 mf1 = mfmatches(ctx1)
987 mf1 = mfmatches(ctx1)
989 if working:
988 if working:
990 # we are comparing working dir against non-parent
989 # we are comparing working dir against non-parent
991 # generate a pseudo-manifest for the working dir
990 # generate a pseudo-manifest for the working dir
992 mf2 = mfmatches(self['.'])
991 mf2 = mfmatches(self['.'])
993 for f in cmp + modified + added:
992 for f in cmp + modified + added:
994 mf2[f] = None
993 mf2[f] = None
995 mf2.set(f, ctx2.flags(f))
994 mf2.set(f, ctx2.flags(f))
996 for f in removed:
995 for f in removed:
997 if f in mf2:
996 if f in mf2:
998 del mf2[f]
997 del mf2[f]
999 else:
998 else:
1000 # we are comparing two revisions
999 # we are comparing two revisions
1001 deleted, unknown, ignored = [], [], []
1000 deleted, unknown, ignored = [], [], []
1002 mf2 = mfmatches(ctx2)
1001 mf2 = mfmatches(ctx2)
1003
1002
1004 modified, added, clean = [], [], []
1003 modified, added, clean = [], [], []
1005 for fn in mf2:
1004 for fn in mf2:
1006 if fn in mf1:
1005 if fn in mf1:
1007 if (mf1.flags(fn) != mf2.flags(fn) or
1006 if (mf1.flags(fn) != mf2.flags(fn) or
1008 (mf1[fn] != mf2[fn] and
1007 (mf1[fn] != mf2[fn] and
1009 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1008 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1010 modified.append(fn)
1009 modified.append(fn)
1011 elif listclean:
1010 elif listclean:
1012 clean.append(fn)
1011 clean.append(fn)
1013 del mf1[fn]
1012 del mf1[fn]
1014 else:
1013 else:
1015 added.append(fn)
1014 added.append(fn)
1016 removed = mf1.keys()
1015 removed = mf1.keys()
1017
1016
1018 r = modified, added, removed, deleted, unknown, ignored, clean
1017 r = modified, added, removed, deleted, unknown, ignored, clean
1019 [l.sort() for l in r]
1018 [l.sort() for l in r]
1020 return r
1019 return r
1021
1020
1022 def add(self, list):
1021 def add(self, list):
1023 wlock = self.wlock()
1022 wlock = self.wlock()
1024 try:
1023 try:
1025 rejected = []
1024 rejected = []
1026 for f in list:
1025 for f in list:
1027 p = self.wjoin(f)
1026 p = self.wjoin(f)
1028 try:
1027 try:
1029 st = os.lstat(p)
1028 st = os.lstat(p)
1030 except:
1029 except:
1031 self.ui.warn(_("%s does not exist!\n") % f)
1030 self.ui.warn(_("%s does not exist!\n") % f)
1032 rejected.append(f)
1031 rejected.append(f)
1033 continue
1032 continue
1034 if st.st_size > 10000000:
1033 if st.st_size > 10000000:
1035 self.ui.warn(_("%s: files over 10MB may cause memory and"
1034 self.ui.warn(_("%s: files over 10MB may cause memory and"
1036 " performance problems\n"
1035 " performance problems\n"
1037 "(use 'hg revert %s' to unadd the file)\n")
1036 "(use 'hg revert %s' to unadd the file)\n")
1038 % (f, f))
1037 % (f, f))
1039 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1038 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1040 self.ui.warn(_("%s not added: only files and symlinks "
1039 self.ui.warn(_("%s not added: only files and symlinks "
1041 "supported currently\n") % f)
1040 "supported currently\n") % f)
1042 rejected.append(p)
1041 rejected.append(p)
1043 elif self.dirstate[f] in 'amn':
1042 elif self.dirstate[f] in 'amn':
1044 self.ui.warn(_("%s already tracked!\n") % f)
1043 self.ui.warn(_("%s already tracked!\n") % f)
1045 elif self.dirstate[f] == 'r':
1044 elif self.dirstate[f] == 'r':
1046 self.dirstate.normallookup(f)
1045 self.dirstate.normallookup(f)
1047 else:
1046 else:
1048 self.dirstate.add(f)
1047 self.dirstate.add(f)
1049 return rejected
1048 return rejected
1050 finally:
1049 finally:
1051 wlock.release()
1050 wlock.release()
1052
1051
1053 def forget(self, list):
1052 def forget(self, list):
1054 wlock = self.wlock()
1053 wlock = self.wlock()
1055 try:
1054 try:
1056 for f in list:
1055 for f in list:
1057 if self.dirstate[f] != 'a':
1056 if self.dirstate[f] != 'a':
1058 self.ui.warn(_("%s not added!\n") % f)
1057 self.ui.warn(_("%s not added!\n") % f)
1059 else:
1058 else:
1060 self.dirstate.forget(f)
1059 self.dirstate.forget(f)
1061 finally:
1060 finally:
1062 wlock.release()
1061 wlock.release()
1063
1062
1064 def remove(self, list, unlink=False):
1063 def remove(self, list, unlink=False):
1065 wlock = None
1066 try:
1067 if unlink:
1064 if unlink:
1068 for f in list:
1065 for f in list:
1069 try:
1066 try:
1070 util.unlink(self.wjoin(f))
1067 util.unlink(self.wjoin(f))
1071 except OSError, inst:
1068 except OSError, inst:
1072 if inst.errno != errno.ENOENT:
1069 if inst.errno != errno.ENOENT:
1073 raise
1070 raise
1074 wlock = self.wlock()
1071 wlock = self.wlock()
1072 try:
1075 for f in list:
1073 for f in list:
1076 if unlink and os.path.exists(self.wjoin(f)):
1074 if unlink and os.path.exists(self.wjoin(f)):
1077 self.ui.warn(_("%s still exists!\n") % f)
1075 self.ui.warn(_("%s still exists!\n") % f)
1078 elif self.dirstate[f] == 'a':
1076 elif self.dirstate[f] == 'a':
1079 self.dirstate.forget(f)
1077 self.dirstate.forget(f)
1080 elif f not in self.dirstate:
1078 elif f not in self.dirstate:
1081 self.ui.warn(_("%s not tracked!\n") % f)
1079 self.ui.warn(_("%s not tracked!\n") % f)
1082 else:
1080 else:
1083 self.dirstate.remove(f)
1081 self.dirstate.remove(f)
1084 finally:
1082 finally:
1085 release(wlock)
1083 wlock.release()
1086
1084
1087 def undelete(self, list):
1085 def undelete(self, list):
1088 manifests = [self.manifest.read(self.changelog.read(p)[0])
1086 manifests = [self.manifest.read(self.changelog.read(p)[0])
1089 for p in self.dirstate.parents() if p != nullid]
1087 for p in self.dirstate.parents() if p != nullid]
1090 wlock = self.wlock()
1088 wlock = self.wlock()
1091 try:
1089 try:
1092 for f in list:
1090 for f in list:
1093 if self.dirstate[f] != 'r':
1091 if self.dirstate[f] != 'r':
1094 self.ui.warn(_("%s not removed!\n") % f)
1092 self.ui.warn(_("%s not removed!\n") % f)
1095 else:
1093 else:
1096 m = f in manifests[0] and manifests[0] or manifests[1]
1094 m = f in manifests[0] and manifests[0] or manifests[1]
1097 t = self.file(f).read(m[f])
1095 t = self.file(f).read(m[f])
1098 self.wwrite(f, t, m.flags(f))
1096 self.wwrite(f, t, m.flags(f))
1099 self.dirstate.normal(f)
1097 self.dirstate.normal(f)
1100 finally:
1098 finally:
1101 wlock.release()
1099 wlock.release()
1102
1100
1103 def copy(self, source, dest):
1101 def copy(self, source, dest):
1104 p = self.wjoin(dest)
1102 p = self.wjoin(dest)
1105 if not (os.path.exists(p) or os.path.islink(p)):
1103 if not (os.path.exists(p) or os.path.islink(p)):
1106 self.ui.warn(_("%s does not exist!\n") % dest)
1104 self.ui.warn(_("%s does not exist!\n") % dest)
1107 elif not (os.path.isfile(p) or os.path.islink(p)):
1105 elif not (os.path.isfile(p) or os.path.islink(p)):
1108 self.ui.warn(_("copy failed: %s is not a file or a "
1106 self.ui.warn(_("copy failed: %s is not a file or a "
1109 "symbolic link\n") % dest)
1107 "symbolic link\n") % dest)
1110 else:
1108 else:
1111 wlock = self.wlock()
1109 wlock = self.wlock()
1112 try:
1110 try:
1113 if self.dirstate[dest] in '?r':
1111 if self.dirstate[dest] in '?r':
1114 self.dirstate.add(dest)
1112 self.dirstate.add(dest)
1115 self.dirstate.copy(source, dest)
1113 self.dirstate.copy(source, dest)
1116 finally:
1114 finally:
1117 wlock.release()
1115 wlock.release()
1118
1116
1119 def heads(self, start=None, closed=True):
1117 def heads(self, start=None, closed=True):
1120 heads = self.changelog.heads(start)
1118 heads = self.changelog.heads(start)
1121 def display(head):
1119 def display(head):
1122 if closed:
1120 if closed:
1123 return True
1121 return True
1124 extras = self.changelog.read(head)[5]
1122 extras = self.changelog.read(head)[5]
1125 return ('close' not in extras)
1123 return ('close' not in extras)
1126 # sort the output in rev descending order
1124 # sort the output in rev descending order
1127 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1125 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1128 return [n for (r, n) in sorted(heads)]
1126 return [n for (r, n) in sorted(heads)]
1129
1127
1130 def branchheads(self, branch=None, start=None, closed=True):
1128 def branchheads(self, branch=None, start=None, closed=True):
1131 if branch is None:
1129 if branch is None:
1132 branch = self[None].branch()
1130 branch = self[None].branch()
1133 branches = self.branchmap()
1131 branches = self.branchmap()
1134 if branch not in branches:
1132 if branch not in branches:
1135 return []
1133 return []
1136 bheads = branches[branch]
1134 bheads = branches[branch]
1137 # the cache returns heads ordered lowest to highest
1135 # the cache returns heads ordered lowest to highest
1138 bheads.reverse()
1136 bheads.reverse()
1139 if start is not None:
1137 if start is not None:
1140 # filter out the heads that cannot be reached from startrev
1138 # filter out the heads that cannot be reached from startrev
1141 bheads = self.changelog.nodesbetween([start], bheads)[2]
1139 bheads = self.changelog.nodesbetween([start], bheads)[2]
1142 if not closed:
1140 if not closed:
1143 bheads = [h for h in bheads if
1141 bheads = [h for h in bheads if
1144 ('close' not in self.changelog.read(h)[5])]
1142 ('close' not in self.changelog.read(h)[5])]
1145 return bheads
1143 return bheads
1146
1144
1147 def branches(self, nodes):
1145 def branches(self, nodes):
1148 if not nodes:
1146 if not nodes:
1149 nodes = [self.changelog.tip()]
1147 nodes = [self.changelog.tip()]
1150 b = []
1148 b = []
1151 for n in nodes:
1149 for n in nodes:
1152 t = n
1150 t = n
1153 while 1:
1151 while 1:
1154 p = self.changelog.parents(n)
1152 p = self.changelog.parents(n)
1155 if p[1] != nullid or p[0] == nullid:
1153 if p[1] != nullid or p[0] == nullid:
1156 b.append((t, n, p[0], p[1]))
1154 b.append((t, n, p[0], p[1]))
1157 break
1155 break
1158 n = p[0]
1156 n = p[0]
1159 return b
1157 return b
1160
1158
1161 def between(self, pairs):
1159 def between(self, pairs):
1162 r = []
1160 r = []
1163
1161
1164 for top, bottom in pairs:
1162 for top, bottom in pairs:
1165 n, l, i = top, [], 0
1163 n, l, i = top, [], 0
1166 f = 1
1164 f = 1
1167
1165
1168 while n != bottom and n != nullid:
1166 while n != bottom and n != nullid:
1169 p = self.changelog.parents(n)[0]
1167 p = self.changelog.parents(n)[0]
1170 if i == f:
1168 if i == f:
1171 l.append(n)
1169 l.append(n)
1172 f = f * 2
1170 f = f * 2
1173 n = p
1171 n = p
1174 i += 1
1172 i += 1
1175
1173
1176 r.append(l)
1174 r.append(l)
1177
1175
1178 return r
1176 return r
1179
1177
1180 def findincoming(self, remote, base=None, heads=None, force=False):
1178 def findincoming(self, remote, base=None, heads=None, force=False):
1181 """Return list of roots of the subsets of missing nodes from remote
1179 """Return list of roots of the subsets of missing nodes from remote
1182
1180
1183 If base dict is specified, assume that these nodes and their parents
1181 If base dict is specified, assume that these nodes and their parents
1184 exist on the remote side and that no child of a node of base exists
1182 exist on the remote side and that no child of a node of base exists
1185 in both remote and self.
1183 in both remote and self.
1186 Furthermore base will be updated to include the nodes that exists
1184 Furthermore base will be updated to include the nodes that exists
1187 in self and remote but no children exists in self and remote.
1185 in self and remote but no children exists in self and remote.
1188 If a list of heads is specified, return only nodes which are heads
1186 If a list of heads is specified, return only nodes which are heads
1189 or ancestors of these heads.
1187 or ancestors of these heads.
1190
1188
1191 All the ancestors of base are in self and in remote.
1189 All the ancestors of base are in self and in remote.
1192 All the descendants of the list returned are missing in self.
1190 All the descendants of the list returned are missing in self.
1193 (and so we know that the rest of the nodes are missing in remote, see
1191 (and so we know that the rest of the nodes are missing in remote, see
1194 outgoing)
1192 outgoing)
1195 """
1193 """
1196 return self.findcommonincoming(remote, base, heads, force)[1]
1194 return self.findcommonincoming(remote, base, heads, force)[1]
1197
1195
1198 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1196 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1199 """Return a tuple (common, missing roots, heads) used to identify
1197 """Return a tuple (common, missing roots, heads) used to identify
1200 missing nodes from remote.
1198 missing nodes from remote.
1201
1199
1202 If base dict is specified, assume that these nodes and their parents
1200 If base dict is specified, assume that these nodes and their parents
1203 exist on the remote side and that no child of a node of base exists
1201 exist on the remote side and that no child of a node of base exists
1204 in both remote and self.
1202 in both remote and self.
1205 Furthermore base will be updated to include the nodes that exists
1203 Furthermore base will be updated to include the nodes that exists
1206 in self and remote but no children exists in self and remote.
1204 in self and remote but no children exists in self and remote.
1207 If a list of heads is specified, return only nodes which are heads
1205 If a list of heads is specified, return only nodes which are heads
1208 or ancestors of these heads.
1206 or ancestors of these heads.
1209
1207
1210 All the ancestors of base are in self and in remote.
1208 All the ancestors of base are in self and in remote.
1211 """
1209 """
1212 m = self.changelog.nodemap
1210 m = self.changelog.nodemap
1213 search = []
1211 search = []
1214 fetch = set()
1212 fetch = set()
1215 seen = set()
1213 seen = set()
1216 seenbranch = set()
1214 seenbranch = set()
1217 if base is None:
1215 if base is None:
1218 base = {}
1216 base = {}
1219
1217
1220 if not heads:
1218 if not heads:
1221 heads = remote.heads()
1219 heads = remote.heads()
1222
1220
1223 if self.changelog.tip() == nullid:
1221 if self.changelog.tip() == nullid:
1224 base[nullid] = 1
1222 base[nullid] = 1
1225 if heads != [nullid]:
1223 if heads != [nullid]:
1226 return [nullid], [nullid], list(heads)
1224 return [nullid], [nullid], list(heads)
1227 return [nullid], [], []
1225 return [nullid], [], []
1228
1226
1229 # assume we're closer to the tip than the root
1227 # assume we're closer to the tip than the root
1230 # and start by examining the heads
1228 # and start by examining the heads
1231 self.ui.status(_("searching for changes\n"))
1229 self.ui.status(_("searching for changes\n"))
1232
1230
1233 unknown = []
1231 unknown = []
1234 for h in heads:
1232 for h in heads:
1235 if h not in m:
1233 if h not in m:
1236 unknown.append(h)
1234 unknown.append(h)
1237 else:
1235 else:
1238 base[h] = 1
1236 base[h] = 1
1239
1237
1240 heads = unknown
1238 heads = unknown
1241 if not unknown:
1239 if not unknown:
1242 return base.keys(), [], []
1240 return base.keys(), [], []
1243
1241
1244 req = set(unknown)
1242 req = set(unknown)
1245 reqcnt = 0
1243 reqcnt = 0
1246
1244
1247 # search through remote branches
1245 # search through remote branches
1248 # a 'branch' here is a linear segment of history, with four parts:
1246 # a 'branch' here is a linear segment of history, with four parts:
1249 # head, root, first parent, second parent
1247 # head, root, first parent, second parent
1250 # (a branch always has two parents (or none) by definition)
1248 # (a branch always has two parents (or none) by definition)
1251 unknown = remote.branches(unknown)
1249 unknown = remote.branches(unknown)
1252 while unknown:
1250 while unknown:
1253 r = []
1251 r = []
1254 while unknown:
1252 while unknown:
1255 n = unknown.pop(0)
1253 n = unknown.pop(0)
1256 if n[0] in seen:
1254 if n[0] in seen:
1257 continue
1255 continue
1258
1256
1259 self.ui.debug(_("examining %s:%s\n")
1257 self.ui.debug(_("examining %s:%s\n")
1260 % (short(n[0]), short(n[1])))
1258 % (short(n[0]), short(n[1])))
1261 if n[0] == nullid: # found the end of the branch
1259 if n[0] == nullid: # found the end of the branch
1262 pass
1260 pass
1263 elif n in seenbranch:
1261 elif n in seenbranch:
1264 self.ui.debug(_("branch already found\n"))
1262 self.ui.debug(_("branch already found\n"))
1265 continue
1263 continue
1266 elif n[1] and n[1] in m: # do we know the base?
1264 elif n[1] and n[1] in m: # do we know the base?
1267 self.ui.debug(_("found incomplete branch %s:%s\n")
1265 self.ui.debug(_("found incomplete branch %s:%s\n")
1268 % (short(n[0]), short(n[1])))
1266 % (short(n[0]), short(n[1])))
1269 search.append(n[0:2]) # schedule branch range for scanning
1267 search.append(n[0:2]) # schedule branch range for scanning
1270 seenbranch.add(n)
1268 seenbranch.add(n)
1271 else:
1269 else:
1272 if n[1] not in seen and n[1] not in fetch:
1270 if n[1] not in seen and n[1] not in fetch:
1273 if n[2] in m and n[3] in m:
1271 if n[2] in m and n[3] in m:
1274 self.ui.debug(_("found new changeset %s\n") %
1272 self.ui.debug(_("found new changeset %s\n") %
1275 short(n[1]))
1273 short(n[1]))
1276 fetch.add(n[1]) # earliest unknown
1274 fetch.add(n[1]) # earliest unknown
1277 for p in n[2:4]:
1275 for p in n[2:4]:
1278 if p in m:
1276 if p in m:
1279 base[p] = 1 # latest known
1277 base[p] = 1 # latest known
1280
1278
1281 for p in n[2:4]:
1279 for p in n[2:4]:
1282 if p not in req and p not in m:
1280 if p not in req and p not in m:
1283 r.append(p)
1281 r.append(p)
1284 req.add(p)
1282 req.add(p)
1285 seen.add(n[0])
1283 seen.add(n[0])
1286
1284
1287 if r:
1285 if r:
1288 reqcnt += 1
1286 reqcnt += 1
1289 self.ui.debug(_("request %d: %s\n") %
1287 self.ui.debug(_("request %d: %s\n") %
1290 (reqcnt, " ".join(map(short, r))))
1288 (reqcnt, " ".join(map(short, r))))
1291 for p in xrange(0, len(r), 10):
1289 for p in xrange(0, len(r), 10):
1292 for b in remote.branches(r[p:p+10]):
1290 for b in remote.branches(r[p:p+10]):
1293 self.ui.debug(_("received %s:%s\n") %
1291 self.ui.debug(_("received %s:%s\n") %
1294 (short(b[0]), short(b[1])))
1292 (short(b[0]), short(b[1])))
1295 unknown.append(b)
1293 unknown.append(b)
1296
1294
1297 # do binary search on the branches we found
1295 # do binary search on the branches we found
1298 while search:
1296 while search:
1299 newsearch = []
1297 newsearch = []
1300 reqcnt += 1
1298 reqcnt += 1
1301 for n, l in zip(search, remote.between(search)):
1299 for n, l in zip(search, remote.between(search)):
1302 l.append(n[1])
1300 l.append(n[1])
1303 p = n[0]
1301 p = n[0]
1304 f = 1
1302 f = 1
1305 for i in l:
1303 for i in l:
1306 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1304 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1307 if i in m:
1305 if i in m:
1308 if f <= 2:
1306 if f <= 2:
1309 self.ui.debug(_("found new branch changeset %s\n") %
1307 self.ui.debug(_("found new branch changeset %s\n") %
1310 short(p))
1308 short(p))
1311 fetch.add(p)
1309 fetch.add(p)
1312 base[i] = 1
1310 base[i] = 1
1313 else:
1311 else:
1314 self.ui.debug(_("narrowed branch search to %s:%s\n")
1312 self.ui.debug(_("narrowed branch search to %s:%s\n")
1315 % (short(p), short(i)))
1313 % (short(p), short(i)))
1316 newsearch.append((p, i))
1314 newsearch.append((p, i))
1317 break
1315 break
1318 p, f = i, f * 2
1316 p, f = i, f * 2
1319 search = newsearch
1317 search = newsearch
1320
1318
1321 # sanity check our fetch list
1319 # sanity check our fetch list
1322 for f in fetch:
1320 for f in fetch:
1323 if f in m:
1321 if f in m:
1324 raise error.RepoError(_("already have changeset ")
1322 raise error.RepoError(_("already have changeset ")
1325 + short(f[:4]))
1323 + short(f[:4]))
1326
1324
1327 if base.keys() == [nullid]:
1325 if base.keys() == [nullid]:
1328 if force:
1326 if force:
1329 self.ui.warn(_("warning: repository is unrelated\n"))
1327 self.ui.warn(_("warning: repository is unrelated\n"))
1330 else:
1328 else:
1331 raise util.Abort(_("repository is unrelated"))
1329 raise util.Abort(_("repository is unrelated"))
1332
1330
1333 self.ui.debug(_("found new changesets starting at ") +
1331 self.ui.debug(_("found new changesets starting at ") +
1334 " ".join([short(f) for f in fetch]) + "\n")
1332 " ".join([short(f) for f in fetch]) + "\n")
1335
1333
1336 self.ui.debug(_("%d total queries\n") % reqcnt)
1334 self.ui.debug(_("%d total queries\n") % reqcnt)
1337
1335
1338 return base.keys(), list(fetch), heads
1336 return base.keys(), list(fetch), heads
1339
1337
1340 def findoutgoing(self, remote, base=None, heads=None, force=False):
1338 def findoutgoing(self, remote, base=None, heads=None, force=False):
1341 """Return list of nodes that are roots of subsets not in remote
1339 """Return list of nodes that are roots of subsets not in remote
1342
1340
1343 If base dict is specified, assume that these nodes and their parents
1341 If base dict is specified, assume that these nodes and their parents
1344 exist on the remote side.
1342 exist on the remote side.
1345 If a list of heads is specified, return only nodes which are heads
1343 If a list of heads is specified, return only nodes which are heads
1346 or ancestors of these heads, and return a second element which
1344 or ancestors of these heads, and return a second element which
1347 contains all remote heads which get new children.
1345 contains all remote heads which get new children.
1348 """
1346 """
1349 if base is None:
1347 if base is None:
1350 base = {}
1348 base = {}
1351 self.findincoming(remote, base, heads, force=force)
1349 self.findincoming(remote, base, heads, force=force)
1352
1350
1353 self.ui.debug(_("common changesets up to ")
1351 self.ui.debug(_("common changesets up to ")
1354 + " ".join(map(short, base.keys())) + "\n")
1352 + " ".join(map(short, base.keys())) + "\n")
1355
1353
1356 remain = set(self.changelog.nodemap)
1354 remain = set(self.changelog.nodemap)
1357
1355
1358 # prune everything remote has from the tree
1356 # prune everything remote has from the tree
1359 remain.remove(nullid)
1357 remain.remove(nullid)
1360 remove = base.keys()
1358 remove = base.keys()
1361 while remove:
1359 while remove:
1362 n = remove.pop(0)
1360 n = remove.pop(0)
1363 if n in remain:
1361 if n in remain:
1364 remain.remove(n)
1362 remain.remove(n)
1365 for p in self.changelog.parents(n):
1363 for p in self.changelog.parents(n):
1366 remove.append(p)
1364 remove.append(p)
1367
1365
1368 # find every node whose parents have been pruned
1366 # find every node whose parents have been pruned
1369 subset = []
1367 subset = []
1370 # find every remote head that will get new children
1368 # find every remote head that will get new children
1371 updated_heads = set()
1369 updated_heads = set()
1372 for n in remain:
1370 for n in remain:
1373 p1, p2 = self.changelog.parents(n)
1371 p1, p2 = self.changelog.parents(n)
1374 if p1 not in remain and p2 not in remain:
1372 if p1 not in remain and p2 not in remain:
1375 subset.append(n)
1373 subset.append(n)
1376 if heads:
1374 if heads:
1377 if p1 in heads:
1375 if p1 in heads:
1378 updated_heads.add(p1)
1376 updated_heads.add(p1)
1379 if p2 in heads:
1377 if p2 in heads:
1380 updated_heads.add(p2)
1378 updated_heads.add(p2)
1381
1379
1382 # this is the set of all roots we have to push
1380 # this is the set of all roots we have to push
1383 if heads:
1381 if heads:
1384 return subset, list(updated_heads)
1382 return subset, list(updated_heads)
1385 else:
1383 else:
1386 return subset
1384 return subset
1387
1385
1388 def pull(self, remote, heads=None, force=False):
1386 def pull(self, remote, heads=None, force=False):
1389 lock = self.lock()
1387 lock = self.lock()
1390 try:
1388 try:
1391 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1389 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1392 force=force)
1390 force=force)
1393 if fetch == [nullid]:
1391 if fetch == [nullid]:
1394 self.ui.status(_("requesting all changes\n"))
1392 self.ui.status(_("requesting all changes\n"))
1395
1393
1396 if not fetch:
1394 if not fetch:
1397 self.ui.status(_("no changes found\n"))
1395 self.ui.status(_("no changes found\n"))
1398 return 0
1396 return 0
1399
1397
1400 if heads is None and remote.capable('changegroupsubset'):
1398 if heads is None and remote.capable('changegroupsubset'):
1401 heads = rheads
1399 heads = rheads
1402
1400
1403 if heads is None:
1401 if heads is None:
1404 cg = remote.changegroup(fetch, 'pull')
1402 cg = remote.changegroup(fetch, 'pull')
1405 else:
1403 else:
1406 if not remote.capable('changegroupsubset'):
1404 if not remote.capable('changegroupsubset'):
1407 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1405 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1408 cg = remote.changegroupsubset(fetch, heads, 'pull')
1406 cg = remote.changegroupsubset(fetch, heads, 'pull')
1409 return self.addchangegroup(cg, 'pull', remote.url())
1407 return self.addchangegroup(cg, 'pull', remote.url())
1410 finally:
1408 finally:
1411 lock.release()
1409 lock.release()
1412
1410
1413 def push(self, remote, force=False, revs=None):
1411 def push(self, remote, force=False, revs=None):
1414 # there are two ways to push to remote repo:
1412 # there are two ways to push to remote repo:
1415 #
1413 #
1416 # addchangegroup assumes local user can lock remote
1414 # addchangegroup assumes local user can lock remote
1417 # repo (local filesystem, old ssh servers).
1415 # repo (local filesystem, old ssh servers).
1418 #
1416 #
1419 # unbundle assumes local user cannot lock remote repo (new ssh
1417 # unbundle assumes local user cannot lock remote repo (new ssh
1420 # servers, http servers).
1418 # servers, http servers).
1421
1419
1422 if remote.capable('unbundle'):
1420 if remote.capable('unbundle'):
1423 return self.push_unbundle(remote, force, revs)
1421 return self.push_unbundle(remote, force, revs)
1424 return self.push_addchangegroup(remote, force, revs)
1422 return self.push_addchangegroup(remote, force, revs)
1425
1423
1426 def prepush(self, remote, force, revs):
1424 def prepush(self, remote, force, revs):
1427 common = {}
1425 common = {}
1428 remote_heads = remote.heads()
1426 remote_heads = remote.heads()
1429 inc = self.findincoming(remote, common, remote_heads, force=force)
1427 inc = self.findincoming(remote, common, remote_heads, force=force)
1430
1428
1431 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1429 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1432 if revs is not None:
1430 if revs is not None:
1433 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1431 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1434 else:
1432 else:
1435 bases, heads = update, self.changelog.heads()
1433 bases, heads = update, self.changelog.heads()
1436
1434
1437 def checkbranch(lheads, rheads, updatelh):
1435 def checkbranch(lheads, rheads, updatelh):
1438 '''
1436 '''
1439 check whether there are more local heads than remote heads on
1437 check whether there are more local heads than remote heads on
1440 a specific branch.
1438 a specific branch.
1441
1439
1442 lheads: local branch heads
1440 lheads: local branch heads
1443 rheads: remote branch heads
1441 rheads: remote branch heads
1444 updatelh: outgoing local branch heads
1442 updatelh: outgoing local branch heads
1445 '''
1443 '''
1446
1444
1447 warn = 0
1445 warn = 0
1448
1446
1449 if not revs and len(lheads) > len(rheads):
1447 if not revs and len(lheads) > len(rheads):
1450 warn = 1
1448 warn = 1
1451 else:
1449 else:
1452 updatelheads = [self.changelog.heads(x, lheads)
1450 updatelheads = [self.changelog.heads(x, lheads)
1453 for x in updatelh]
1451 for x in updatelh]
1454 newheads = set(sum(updatelheads, [])) & set(lheads)
1452 newheads = set(sum(updatelheads, [])) & set(lheads)
1455
1453
1456 if not newheads:
1454 if not newheads:
1457 return True
1455 return True
1458
1456
1459 for r in rheads:
1457 for r in rheads:
1460 if r in self.changelog.nodemap:
1458 if r in self.changelog.nodemap:
1461 desc = self.changelog.heads(r, heads)
1459 desc = self.changelog.heads(r, heads)
1462 l = [h for h in heads if h in desc]
1460 l = [h for h in heads if h in desc]
1463 if not l:
1461 if not l:
1464 newheads.add(r)
1462 newheads.add(r)
1465 else:
1463 else:
1466 newheads.add(r)
1464 newheads.add(r)
1467 if len(newheads) > len(rheads):
1465 if len(newheads) > len(rheads):
1468 warn = 1
1466 warn = 1
1469
1467
1470 if warn:
1468 if warn:
1471 if not rheads: # new branch requires --force
1469 if not rheads: # new branch requires --force
1472 self.ui.warn(_("abort: push creates new"
1470 self.ui.warn(_("abort: push creates new"
1473 " remote branch '%s'!\n" %
1471 " remote branch '%s'!\n" %
1474 self[updatelh[0]].branch()))
1472 self[updatelh[0]].branch()))
1475 else:
1473 else:
1476 self.ui.warn(_("abort: push creates new remote heads!\n"))
1474 self.ui.warn(_("abort: push creates new remote heads!\n"))
1477
1475
1478 self.ui.status(_("(did you forget to merge?"
1476 self.ui.status(_("(did you forget to merge?"
1479 " use push -f to force)\n"))
1477 " use push -f to force)\n"))
1480 return False
1478 return False
1481 return True
1479 return True
1482
1480
1483 if not bases:
1481 if not bases:
1484 self.ui.status(_("no changes found\n"))
1482 self.ui.status(_("no changes found\n"))
1485 return None, 1
1483 return None, 1
1486 elif not force:
1484 elif not force:
1487 # Check for each named branch if we're creating new remote heads.
1485 # Check for each named branch if we're creating new remote heads.
1488 # To be a remote head after push, node must be either:
1486 # To be a remote head after push, node must be either:
1489 # - unknown locally
1487 # - unknown locally
1490 # - a local outgoing head descended from update
1488 # - a local outgoing head descended from update
1491 # - a remote head that's known locally and not
1489 # - a remote head that's known locally and not
1492 # ancestral to an outgoing head
1490 # ancestral to an outgoing head
1493 #
1491 #
1494 # New named branches cannot be created without --force.
1492 # New named branches cannot be created without --force.
1495
1493
1496 if remote_heads != [nullid]:
1494 if remote_heads != [nullid]:
1497 if remote.capable('branchmap'):
1495 if remote.capable('branchmap'):
1498 localhds = {}
1496 localhds = {}
1499 if not revs:
1497 if not revs:
1500 localhds = self.branchmap()
1498 localhds = self.branchmap()
1501 else:
1499 else:
1502 for n in heads:
1500 for n in heads:
1503 branch = self[n].branch()
1501 branch = self[n].branch()
1504 if branch in localhds:
1502 if branch in localhds:
1505 localhds[branch].append(n)
1503 localhds[branch].append(n)
1506 else:
1504 else:
1507 localhds[branch] = [n]
1505 localhds[branch] = [n]
1508
1506
1509 remotehds = remote.branchmap()
1507 remotehds = remote.branchmap()
1510
1508
1511 for lh in localhds:
1509 for lh in localhds:
1512 if lh in remotehds:
1510 if lh in remotehds:
1513 rheads = remotehds[lh]
1511 rheads = remotehds[lh]
1514 else:
1512 else:
1515 rheads = []
1513 rheads = []
1516 lheads = localhds[lh]
1514 lheads = localhds[lh]
1517 updatelh = [upd for upd in update
1515 updatelh = [upd for upd in update
1518 if self[upd].branch() == lh]
1516 if self[upd].branch() == lh]
1519 if not updatelh:
1517 if not updatelh:
1520 continue
1518 continue
1521 if not checkbranch(lheads, rheads, updatelh):
1519 if not checkbranch(lheads, rheads, updatelh):
1522 return None, 0
1520 return None, 0
1523 else:
1521 else:
1524 if not checkbranch(heads, remote_heads, update):
1522 if not checkbranch(heads, remote_heads, update):
1525 return None, 0
1523 return None, 0
1526
1524
1527 if inc:
1525 if inc:
1528 self.ui.warn(_("note: unsynced remote changes!\n"))
1526 self.ui.warn(_("note: unsynced remote changes!\n"))
1529
1527
1530
1528
1531 if revs is None:
1529 if revs is None:
1532 # use the fast path, no race possible on push
1530 # use the fast path, no race possible on push
1533 cg = self._changegroup(common.keys(), 'push')
1531 cg = self._changegroup(common.keys(), 'push')
1534 else:
1532 else:
1535 cg = self.changegroupsubset(update, revs, 'push')
1533 cg = self.changegroupsubset(update, revs, 'push')
1536 return cg, remote_heads
1534 return cg, remote_heads
1537
1535
1538 def push_addchangegroup(self, remote, force, revs):
1536 def push_addchangegroup(self, remote, force, revs):
1539 lock = remote.lock()
1537 lock = remote.lock()
1540 try:
1538 try:
1541 ret = self.prepush(remote, force, revs)
1539 ret = self.prepush(remote, force, revs)
1542 if ret[0] is not None:
1540 if ret[0] is not None:
1543 cg, remote_heads = ret
1541 cg, remote_heads = ret
1544 return remote.addchangegroup(cg, 'push', self.url())
1542 return remote.addchangegroup(cg, 'push', self.url())
1545 return ret[1]
1543 return ret[1]
1546 finally:
1544 finally:
1547 lock.release()
1545 lock.release()
1548
1546
1549 def push_unbundle(self, remote, force, revs):
1547 def push_unbundle(self, remote, force, revs):
1550 # local repo finds heads on server, finds out what revs it
1548 # local repo finds heads on server, finds out what revs it
1551 # must push. once revs transferred, if server finds it has
1549 # must push. once revs transferred, if server finds it has
1552 # different heads (someone else won commit/push race), server
1550 # different heads (someone else won commit/push race), server
1553 # aborts.
1551 # aborts.
1554
1552
1555 ret = self.prepush(remote, force, revs)
1553 ret = self.prepush(remote, force, revs)
1556 if ret[0] is not None:
1554 if ret[0] is not None:
1557 cg, remote_heads = ret
1555 cg, remote_heads = ret
1558 if force: remote_heads = ['force']
1556 if force: remote_heads = ['force']
1559 return remote.unbundle(cg, remote_heads, 'push')
1557 return remote.unbundle(cg, remote_heads, 'push')
1560 return ret[1]
1558 return ret[1]
1561
1559
1562 def changegroupinfo(self, nodes, source):
1560 def changegroupinfo(self, nodes, source):
1563 if self.ui.verbose or source == 'bundle':
1561 if self.ui.verbose or source == 'bundle':
1564 self.ui.status(_("%d changesets found\n") % len(nodes))
1562 self.ui.status(_("%d changesets found\n") % len(nodes))
1565 if self.ui.debugflag:
1563 if self.ui.debugflag:
1566 self.ui.debug(_("list of changesets:\n"))
1564 self.ui.debug(_("list of changesets:\n"))
1567 for node in nodes:
1565 for node in nodes:
1568 self.ui.debug("%s\n" % hex(node))
1566 self.ui.debug("%s\n" % hex(node))
1569
1567
1570 def changegroupsubset(self, bases, heads, source, extranodes=None):
1568 def changegroupsubset(self, bases, heads, source, extranodes=None):
1571 """This function generates a changegroup consisting of all the nodes
1569 """This function generates a changegroup consisting of all the nodes
1572 that are descendents of any of the bases, and ancestors of any of
1570 that are descendents of any of the bases, and ancestors of any of
1573 the heads.
1571 the heads.
1574
1572
1575 It is fairly complex as determining which filenodes and which
1573 It is fairly complex as determining which filenodes and which
1576 manifest nodes need to be included for the changeset to be complete
1574 manifest nodes need to be included for the changeset to be complete
1577 is non-trivial.
1575 is non-trivial.
1578
1576
1579 Another wrinkle is doing the reverse, figuring out which changeset in
1577 Another wrinkle is doing the reverse, figuring out which changeset in
1580 the changegroup a particular filenode or manifestnode belongs to.
1578 the changegroup a particular filenode or manifestnode belongs to.
1581
1579
1582 The caller can specify some nodes that must be included in the
1580 The caller can specify some nodes that must be included in the
1583 changegroup using the extranodes argument. It should be a dict
1581 changegroup using the extranodes argument. It should be a dict
1584 where the keys are the filenames (or 1 for the manifest), and the
1582 where the keys are the filenames (or 1 for the manifest), and the
1585 values are lists of (node, linknode) tuples, where node is a wanted
1583 values are lists of (node, linknode) tuples, where node is a wanted
1586 node and linknode is the changelog node that should be transmitted as
1584 node and linknode is the changelog node that should be transmitted as
1587 the linkrev.
1585 the linkrev.
1588 """
1586 """
1589
1587
1590 if extranodes is None:
1588 if extranodes is None:
1591 # can we go through the fast path ?
1589 # can we go through the fast path ?
1592 heads.sort()
1590 heads.sort()
1593 allheads = self.heads()
1591 allheads = self.heads()
1594 allheads.sort()
1592 allheads.sort()
1595 if heads == allheads:
1593 if heads == allheads:
1596 common = []
1594 common = []
1597 # parents of bases are known from both sides
1595 # parents of bases are known from both sides
1598 for n in bases:
1596 for n in bases:
1599 for p in self.changelog.parents(n):
1597 for p in self.changelog.parents(n):
1600 if p != nullid:
1598 if p != nullid:
1601 common.append(p)
1599 common.append(p)
1602 return self._changegroup(common, source)
1600 return self._changegroup(common, source)
1603
1601
1604 self.hook('preoutgoing', throw=True, source=source)
1602 self.hook('preoutgoing', throw=True, source=source)
1605
1603
1606 # Set up some initial variables
1604 # Set up some initial variables
1607 # Make it easy to refer to self.changelog
1605 # Make it easy to refer to self.changelog
1608 cl = self.changelog
1606 cl = self.changelog
1609 # msng is short for missing - compute the list of changesets in this
1607 # msng is short for missing - compute the list of changesets in this
1610 # changegroup.
1608 # changegroup.
1611 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1609 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1612 self.changegroupinfo(msng_cl_lst, source)
1610 self.changegroupinfo(msng_cl_lst, source)
1613 # Some bases may turn out to be superfluous, and some heads may be
1611 # Some bases may turn out to be superfluous, and some heads may be
1614 # too. nodesbetween will return the minimal set of bases and heads
1612 # too. nodesbetween will return the minimal set of bases and heads
1615 # necessary to re-create the changegroup.
1613 # necessary to re-create the changegroup.
1616
1614
1617 # Known heads are the list of heads that it is assumed the recipient
1615 # Known heads are the list of heads that it is assumed the recipient
1618 # of this changegroup will know about.
1616 # of this changegroup will know about.
1619 knownheads = set()
1617 knownheads = set()
1620 # We assume that all parents of bases are known heads.
1618 # We assume that all parents of bases are known heads.
1621 for n in bases:
1619 for n in bases:
1622 knownheads.update(cl.parents(n))
1620 knownheads.update(cl.parents(n))
1623 knownheads.discard(nullid)
1621 knownheads.discard(nullid)
1624 knownheads = list(knownheads)
1622 knownheads = list(knownheads)
1625 if knownheads:
1623 if knownheads:
1626 # Now that we know what heads are known, we can compute which
1624 # Now that we know what heads are known, we can compute which
1627 # changesets are known. The recipient must know about all
1625 # changesets are known. The recipient must know about all
1628 # changesets required to reach the known heads from the null
1626 # changesets required to reach the known heads from the null
1629 # changeset.
1627 # changeset.
1630 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1628 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1631 junk = None
1629 junk = None
1632 # Transform the list into a set.
1630 # Transform the list into a set.
1633 has_cl_set = set(has_cl_set)
1631 has_cl_set = set(has_cl_set)
1634 else:
1632 else:
1635 # If there were no known heads, the recipient cannot be assumed to
1633 # If there were no known heads, the recipient cannot be assumed to
1636 # know about any changesets.
1634 # know about any changesets.
1637 has_cl_set = set()
1635 has_cl_set = set()
1638
1636
1639 # Make it easy to refer to self.manifest
1637 # Make it easy to refer to self.manifest
1640 mnfst = self.manifest
1638 mnfst = self.manifest
1641 # We don't know which manifests are missing yet
1639 # We don't know which manifests are missing yet
1642 msng_mnfst_set = {}
1640 msng_mnfst_set = {}
1643 # Nor do we know which filenodes are missing.
1641 # Nor do we know which filenodes are missing.
1644 msng_filenode_set = {}
1642 msng_filenode_set = {}
1645
1643
1646 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1644 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1647 junk = None
1645 junk = None
1648
1646
1649 # A changeset always belongs to itself, so the changenode lookup
1647 # A changeset always belongs to itself, so the changenode lookup
1650 # function for a changenode is identity.
1648 # function for a changenode is identity.
1651 def identity(x):
1649 def identity(x):
1652 return x
1650 return x
1653
1651
1654 # A function generating function. Sets up an environment for the
1652 # A function generating function. Sets up an environment for the
1655 # inner function.
1653 # inner function.
1656 def cmp_by_rev_func(revlog):
1654 def cmp_by_rev_func(revlog):
1657 # Compare two nodes by their revision number in the environment's
1655 # Compare two nodes by their revision number in the environment's
1658 # revision history. Since the revision number both represents the
1656 # revision history. Since the revision number both represents the
1659 # most efficient order to read the nodes in, and represents a
1657 # most efficient order to read the nodes in, and represents a
1660 # topological sorting of the nodes, this function is often useful.
1658 # topological sorting of the nodes, this function is often useful.
1661 def cmp_by_rev(a, b):
1659 def cmp_by_rev(a, b):
1662 return cmp(revlog.rev(a), revlog.rev(b))
1660 return cmp(revlog.rev(a), revlog.rev(b))
1663 return cmp_by_rev
1661 return cmp_by_rev
1664
1662
1665 # If we determine that a particular file or manifest node must be a
1663 # If we determine that a particular file or manifest node must be a
1666 # node that the recipient of the changegroup will already have, we can
1664 # node that the recipient of the changegroup will already have, we can
1667 # also assume the recipient will have all the parents. This function
1665 # also assume the recipient will have all the parents. This function
1668 # prunes them from the set of missing nodes.
1666 # prunes them from the set of missing nodes.
1669 def prune_parents(revlog, hasset, msngset):
1667 def prune_parents(revlog, hasset, msngset):
1670 haslst = list(hasset)
1668 haslst = list(hasset)
1671 haslst.sort(cmp_by_rev_func(revlog))
1669 haslst.sort(cmp_by_rev_func(revlog))
1672 for node in haslst:
1670 for node in haslst:
1673 parentlst = [p for p in revlog.parents(node) if p != nullid]
1671 parentlst = [p for p in revlog.parents(node) if p != nullid]
1674 while parentlst:
1672 while parentlst:
1675 n = parentlst.pop()
1673 n = parentlst.pop()
1676 if n not in hasset:
1674 if n not in hasset:
1677 hasset.add(n)
1675 hasset.add(n)
1678 p = [p for p in revlog.parents(n) if p != nullid]
1676 p = [p for p in revlog.parents(n) if p != nullid]
1679 parentlst.extend(p)
1677 parentlst.extend(p)
1680 for n in hasset:
1678 for n in hasset:
1681 msngset.pop(n, None)
1679 msngset.pop(n, None)
1682
1680
1683 # This is a function generating function used to set up an environment
1681 # This is a function generating function used to set up an environment
1684 # for the inner function to execute in.
1682 # for the inner function to execute in.
1685 def manifest_and_file_collector(changedfileset):
1683 def manifest_and_file_collector(changedfileset):
1686 # This is an information gathering function that gathers
1684 # This is an information gathering function that gathers
1687 # information from each changeset node that goes out as part of
1685 # information from each changeset node that goes out as part of
1688 # the changegroup. The information gathered is a list of which
1686 # the changegroup. The information gathered is a list of which
1689 # manifest nodes are potentially required (the recipient may
1687 # manifest nodes are potentially required (the recipient may
1690 # already have them) and total list of all files which were
1688 # already have them) and total list of all files which were
1691 # changed in any changeset in the changegroup.
1689 # changed in any changeset in the changegroup.
1692 #
1690 #
1693 # We also remember the first changenode we saw any manifest
1691 # We also remember the first changenode we saw any manifest
1694 # referenced by so we can later determine which changenode 'owns'
1692 # referenced by so we can later determine which changenode 'owns'
1695 # the manifest.
1693 # the manifest.
1696 def collect_manifests_and_files(clnode):
1694 def collect_manifests_and_files(clnode):
1697 c = cl.read(clnode)
1695 c = cl.read(clnode)
1698 for f in c[3]:
1696 for f in c[3]:
1699 # This is to make sure we only have one instance of each
1697 # This is to make sure we only have one instance of each
1700 # filename string for each filename.
1698 # filename string for each filename.
1701 changedfileset.setdefault(f, f)
1699 changedfileset.setdefault(f, f)
1702 msng_mnfst_set.setdefault(c[0], clnode)
1700 msng_mnfst_set.setdefault(c[0], clnode)
1703 return collect_manifests_and_files
1701 return collect_manifests_and_files
1704
1702
1705 # Figure out which manifest nodes (of the ones we think might be part
1703 # Figure out which manifest nodes (of the ones we think might be part
1706 # of the changegroup) the recipient must know about and remove them
1704 # of the changegroup) the recipient must know about and remove them
1707 # from the changegroup.
1705 # from the changegroup.
1708 def prune_manifests():
1706 def prune_manifests():
1709 has_mnfst_set = set()
1707 has_mnfst_set = set()
1710 for n in msng_mnfst_set:
1708 for n in msng_mnfst_set:
1711 # If a 'missing' manifest thinks it belongs to a changenode
1709 # If a 'missing' manifest thinks it belongs to a changenode
1712 # the recipient is assumed to have, obviously the recipient
1710 # the recipient is assumed to have, obviously the recipient
1713 # must have that manifest.
1711 # must have that manifest.
1714 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1712 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1715 if linknode in has_cl_set:
1713 if linknode in has_cl_set:
1716 has_mnfst_set.add(n)
1714 has_mnfst_set.add(n)
1717 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1715 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1718
1716
1719 # Use the information collected in collect_manifests_and_files to say
1717 # Use the information collected in collect_manifests_and_files to say
1720 # which changenode any manifestnode belongs to.
1718 # which changenode any manifestnode belongs to.
1721 def lookup_manifest_link(mnfstnode):
1719 def lookup_manifest_link(mnfstnode):
1722 return msng_mnfst_set[mnfstnode]
1720 return msng_mnfst_set[mnfstnode]
1723
1721
1724 # A function generating function that sets up the initial environment
1722 # A function generating function that sets up the initial environment
1725 # the inner function.
1723 # the inner function.
1726 def filenode_collector(changedfiles):
1724 def filenode_collector(changedfiles):
1727 next_rev = [0]
1725 next_rev = [0]
1728 # This gathers information from each manifestnode included in the
1726 # This gathers information from each manifestnode included in the
1729 # changegroup about which filenodes the manifest node references
1727 # changegroup about which filenodes the manifest node references
1730 # so we can include those in the changegroup too.
1728 # so we can include those in the changegroup too.
1731 #
1729 #
1732 # It also remembers which changenode each filenode belongs to. It
1730 # It also remembers which changenode each filenode belongs to. It
1733 # does this by assuming the a filenode belongs to the changenode
1731 # does this by assuming the a filenode belongs to the changenode
1734 # the first manifest that references it belongs to.
1732 # the first manifest that references it belongs to.
1735 def collect_msng_filenodes(mnfstnode):
1733 def collect_msng_filenodes(mnfstnode):
1736 r = mnfst.rev(mnfstnode)
1734 r = mnfst.rev(mnfstnode)
1737 if r == next_rev[0]:
1735 if r == next_rev[0]:
1738 # If the last rev we looked at was the one just previous,
1736 # If the last rev we looked at was the one just previous,
1739 # we only need to see a diff.
1737 # we only need to see a diff.
1740 deltamf = mnfst.readdelta(mnfstnode)
1738 deltamf = mnfst.readdelta(mnfstnode)
1741 # For each line in the delta
1739 # For each line in the delta
1742 for f, fnode in deltamf.iteritems():
1740 for f, fnode in deltamf.iteritems():
1743 f = changedfiles.get(f, None)
1741 f = changedfiles.get(f, None)
1744 # And if the file is in the list of files we care
1742 # And if the file is in the list of files we care
1745 # about.
1743 # about.
1746 if f is not None:
1744 if f is not None:
1747 # Get the changenode this manifest belongs to
1745 # Get the changenode this manifest belongs to
1748 clnode = msng_mnfst_set[mnfstnode]
1746 clnode = msng_mnfst_set[mnfstnode]
1749 # Create the set of filenodes for the file if
1747 # Create the set of filenodes for the file if
1750 # there isn't one already.
1748 # there isn't one already.
1751 ndset = msng_filenode_set.setdefault(f, {})
1749 ndset = msng_filenode_set.setdefault(f, {})
1752 # And set the filenode's changelog node to the
1750 # And set the filenode's changelog node to the
1753 # manifest's if it hasn't been set already.
1751 # manifest's if it hasn't been set already.
1754 ndset.setdefault(fnode, clnode)
1752 ndset.setdefault(fnode, clnode)
1755 else:
1753 else:
1756 # Otherwise we need a full manifest.
1754 # Otherwise we need a full manifest.
1757 m = mnfst.read(mnfstnode)
1755 m = mnfst.read(mnfstnode)
1758 # For every file in we care about.
1756 # For every file in we care about.
1759 for f in changedfiles:
1757 for f in changedfiles:
1760 fnode = m.get(f, None)
1758 fnode = m.get(f, None)
1761 # If it's in the manifest
1759 # If it's in the manifest
1762 if fnode is not None:
1760 if fnode is not None:
1763 # See comments above.
1761 # See comments above.
1764 clnode = msng_mnfst_set[mnfstnode]
1762 clnode = msng_mnfst_set[mnfstnode]
1765 ndset = msng_filenode_set.setdefault(f, {})
1763 ndset = msng_filenode_set.setdefault(f, {})
1766 ndset.setdefault(fnode, clnode)
1764 ndset.setdefault(fnode, clnode)
1767 # Remember the revision we hope to see next.
1765 # Remember the revision we hope to see next.
1768 next_rev[0] = r + 1
1766 next_rev[0] = r + 1
1769 return collect_msng_filenodes
1767 return collect_msng_filenodes
1770
1768
1771 # We have a list of filenodes we think we need for a file, lets remove
1769 # We have a list of filenodes we think we need for a file, lets remove
1772 # all those we know the recipient must have.
1770 # all those we know the recipient must have.
1773 def prune_filenodes(f, filerevlog):
1771 def prune_filenodes(f, filerevlog):
1774 msngset = msng_filenode_set[f]
1772 msngset = msng_filenode_set[f]
1775 hasset = set()
1773 hasset = set()
1776 # If a 'missing' filenode thinks it belongs to a changenode we
1774 # If a 'missing' filenode thinks it belongs to a changenode we
1777 # assume the recipient must have, then the recipient must have
1775 # assume the recipient must have, then the recipient must have
1778 # that filenode.
1776 # that filenode.
1779 for n in msngset:
1777 for n in msngset:
1780 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1778 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1781 if clnode in has_cl_set:
1779 if clnode in has_cl_set:
1782 hasset.add(n)
1780 hasset.add(n)
1783 prune_parents(filerevlog, hasset, msngset)
1781 prune_parents(filerevlog, hasset, msngset)
1784
1782
1785 # A function generator function that sets up the a context for the
1783 # A function generator function that sets up the a context for the
1786 # inner function.
1784 # inner function.
1787 def lookup_filenode_link_func(fname):
1785 def lookup_filenode_link_func(fname):
1788 msngset = msng_filenode_set[fname]
1786 msngset = msng_filenode_set[fname]
1789 # Lookup the changenode the filenode belongs to.
1787 # Lookup the changenode the filenode belongs to.
1790 def lookup_filenode_link(fnode):
1788 def lookup_filenode_link(fnode):
1791 return msngset[fnode]
1789 return msngset[fnode]
1792 return lookup_filenode_link
1790 return lookup_filenode_link
1793
1791
1794 # Add the nodes that were explicitly requested.
1792 # Add the nodes that were explicitly requested.
1795 def add_extra_nodes(name, nodes):
1793 def add_extra_nodes(name, nodes):
1796 if not extranodes or name not in extranodes:
1794 if not extranodes or name not in extranodes:
1797 return
1795 return
1798
1796
1799 for node, linknode in extranodes[name]:
1797 for node, linknode in extranodes[name]:
1800 if node not in nodes:
1798 if node not in nodes:
1801 nodes[node] = linknode
1799 nodes[node] = linknode
1802
1800
1803 # Now that we have all theses utility functions to help out and
1801 # Now that we have all theses utility functions to help out and
1804 # logically divide up the task, generate the group.
1802 # logically divide up the task, generate the group.
1805 def gengroup():
1803 def gengroup():
1806 # The set of changed files starts empty.
1804 # The set of changed files starts empty.
1807 changedfiles = {}
1805 changedfiles = {}
1808 # Create a changenode group generator that will call our functions
1806 # Create a changenode group generator that will call our functions
1809 # back to lookup the owning changenode and collect information.
1807 # back to lookup the owning changenode and collect information.
1810 group = cl.group(msng_cl_lst, identity,
1808 group = cl.group(msng_cl_lst, identity,
1811 manifest_and_file_collector(changedfiles))
1809 manifest_and_file_collector(changedfiles))
1812 for chnk in group:
1810 for chnk in group:
1813 yield chnk
1811 yield chnk
1814
1812
1815 # The list of manifests has been collected by the generator
1813 # The list of manifests has been collected by the generator
1816 # calling our functions back.
1814 # calling our functions back.
1817 prune_manifests()
1815 prune_manifests()
1818 add_extra_nodes(1, msng_mnfst_set)
1816 add_extra_nodes(1, msng_mnfst_set)
1819 msng_mnfst_lst = msng_mnfst_set.keys()
1817 msng_mnfst_lst = msng_mnfst_set.keys()
1820 # Sort the manifestnodes by revision number.
1818 # Sort the manifestnodes by revision number.
1821 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1819 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1822 # Create a generator for the manifestnodes that calls our lookup
1820 # Create a generator for the manifestnodes that calls our lookup
1823 # and data collection functions back.
1821 # and data collection functions back.
1824 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1822 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1825 filenode_collector(changedfiles))
1823 filenode_collector(changedfiles))
1826 for chnk in group:
1824 for chnk in group:
1827 yield chnk
1825 yield chnk
1828
1826
1829 # These are no longer needed, dereference and toss the memory for
1827 # These are no longer needed, dereference and toss the memory for
1830 # them.
1828 # them.
1831 msng_mnfst_lst = None
1829 msng_mnfst_lst = None
1832 msng_mnfst_set.clear()
1830 msng_mnfst_set.clear()
1833
1831
1834 if extranodes:
1832 if extranodes:
1835 for fname in extranodes:
1833 for fname in extranodes:
1836 if isinstance(fname, int):
1834 if isinstance(fname, int):
1837 continue
1835 continue
1838 msng_filenode_set.setdefault(fname, {})
1836 msng_filenode_set.setdefault(fname, {})
1839 changedfiles[fname] = 1
1837 changedfiles[fname] = 1
1840 # Go through all our files in order sorted by name.
1838 # Go through all our files in order sorted by name.
1841 for fname in sorted(changedfiles):
1839 for fname in sorted(changedfiles):
1842 filerevlog = self.file(fname)
1840 filerevlog = self.file(fname)
1843 if not len(filerevlog):
1841 if not len(filerevlog):
1844 raise util.Abort(_("empty or missing revlog for %s") % fname)
1842 raise util.Abort(_("empty or missing revlog for %s") % fname)
1845 # Toss out the filenodes that the recipient isn't really
1843 # Toss out the filenodes that the recipient isn't really
1846 # missing.
1844 # missing.
1847 if fname in msng_filenode_set:
1845 if fname in msng_filenode_set:
1848 prune_filenodes(fname, filerevlog)
1846 prune_filenodes(fname, filerevlog)
1849 add_extra_nodes(fname, msng_filenode_set[fname])
1847 add_extra_nodes(fname, msng_filenode_set[fname])
1850 msng_filenode_lst = msng_filenode_set[fname].keys()
1848 msng_filenode_lst = msng_filenode_set[fname].keys()
1851 else:
1849 else:
1852 msng_filenode_lst = []
1850 msng_filenode_lst = []
1853 # If any filenodes are left, generate the group for them,
1851 # If any filenodes are left, generate the group for them,
1854 # otherwise don't bother.
1852 # otherwise don't bother.
1855 if len(msng_filenode_lst) > 0:
1853 if len(msng_filenode_lst) > 0:
1856 yield changegroup.chunkheader(len(fname))
1854 yield changegroup.chunkheader(len(fname))
1857 yield fname
1855 yield fname
1858 # Sort the filenodes by their revision #
1856 # Sort the filenodes by their revision #
1859 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1857 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1860 # Create a group generator and only pass in a changenode
1858 # Create a group generator and only pass in a changenode
1861 # lookup function as we need to collect no information
1859 # lookup function as we need to collect no information
1862 # from filenodes.
1860 # from filenodes.
1863 group = filerevlog.group(msng_filenode_lst,
1861 group = filerevlog.group(msng_filenode_lst,
1864 lookup_filenode_link_func(fname))
1862 lookup_filenode_link_func(fname))
1865 for chnk in group:
1863 for chnk in group:
1866 yield chnk
1864 yield chnk
1867 if fname in msng_filenode_set:
1865 if fname in msng_filenode_set:
1868 # Don't need this anymore, toss it to free memory.
1866 # Don't need this anymore, toss it to free memory.
1869 del msng_filenode_set[fname]
1867 del msng_filenode_set[fname]
1870 # Signal that no more groups are left.
1868 # Signal that no more groups are left.
1871 yield changegroup.closechunk()
1869 yield changegroup.closechunk()
1872
1870
1873 if msng_cl_lst:
1871 if msng_cl_lst:
1874 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1872 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1875
1873
1876 return util.chunkbuffer(gengroup())
1874 return util.chunkbuffer(gengroup())
1877
1875
1878 def changegroup(self, basenodes, source):
1876 def changegroup(self, basenodes, source):
1879 # to avoid a race we use changegroupsubset() (issue1320)
1877 # to avoid a race we use changegroupsubset() (issue1320)
1880 return self.changegroupsubset(basenodes, self.heads(), source)
1878 return self.changegroupsubset(basenodes, self.heads(), source)
1881
1879
1882 def _changegroup(self, common, source):
1880 def _changegroup(self, common, source):
1883 """Generate a changegroup of all nodes that we have that a recipient
1881 """Generate a changegroup of all nodes that we have that a recipient
1884 doesn't.
1882 doesn't.
1885
1883
1886 This is much easier than the previous function as we can assume that
1884 This is much easier than the previous function as we can assume that
1887 the recipient has any changenode we aren't sending them.
1885 the recipient has any changenode we aren't sending them.
1888
1886
1889 common is the set of common nodes between remote and self"""
1887 common is the set of common nodes between remote and self"""
1890
1888
1891 self.hook('preoutgoing', throw=True, source=source)
1889 self.hook('preoutgoing', throw=True, source=source)
1892
1890
1893 cl = self.changelog
1891 cl = self.changelog
1894 nodes = cl.findmissing(common)
1892 nodes = cl.findmissing(common)
1895 revset = set([cl.rev(n) for n in nodes])
1893 revset = set([cl.rev(n) for n in nodes])
1896 self.changegroupinfo(nodes, source)
1894 self.changegroupinfo(nodes, source)
1897
1895
1898 def identity(x):
1896 def identity(x):
1899 return x
1897 return x
1900
1898
1901 def gennodelst(log):
1899 def gennodelst(log):
1902 for r in log:
1900 for r in log:
1903 if log.linkrev(r) in revset:
1901 if log.linkrev(r) in revset:
1904 yield log.node(r)
1902 yield log.node(r)
1905
1903
1906 def changed_file_collector(changedfileset):
1904 def changed_file_collector(changedfileset):
1907 def collect_changed_files(clnode):
1905 def collect_changed_files(clnode):
1908 c = cl.read(clnode)
1906 c = cl.read(clnode)
1909 changedfileset.update(c[3])
1907 changedfileset.update(c[3])
1910 return collect_changed_files
1908 return collect_changed_files
1911
1909
1912 def lookuprevlink_func(revlog):
1910 def lookuprevlink_func(revlog):
1913 def lookuprevlink(n):
1911 def lookuprevlink(n):
1914 return cl.node(revlog.linkrev(revlog.rev(n)))
1912 return cl.node(revlog.linkrev(revlog.rev(n)))
1915 return lookuprevlink
1913 return lookuprevlink
1916
1914
1917 def gengroup():
1915 def gengroup():
1918 # construct a list of all changed files
1916 # construct a list of all changed files
1919 changedfiles = set()
1917 changedfiles = set()
1920
1918
1921 for chnk in cl.group(nodes, identity,
1919 for chnk in cl.group(nodes, identity,
1922 changed_file_collector(changedfiles)):
1920 changed_file_collector(changedfiles)):
1923 yield chnk
1921 yield chnk
1924
1922
1925 mnfst = self.manifest
1923 mnfst = self.manifest
1926 nodeiter = gennodelst(mnfst)
1924 nodeiter = gennodelst(mnfst)
1927 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1925 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1928 yield chnk
1926 yield chnk
1929
1927
1930 for fname in sorted(changedfiles):
1928 for fname in sorted(changedfiles):
1931 filerevlog = self.file(fname)
1929 filerevlog = self.file(fname)
1932 if not len(filerevlog):
1930 if not len(filerevlog):
1933 raise util.Abort(_("empty or missing revlog for %s") % fname)
1931 raise util.Abort(_("empty or missing revlog for %s") % fname)
1934 nodeiter = gennodelst(filerevlog)
1932 nodeiter = gennodelst(filerevlog)
1935 nodeiter = list(nodeiter)
1933 nodeiter = list(nodeiter)
1936 if nodeiter:
1934 if nodeiter:
1937 yield changegroup.chunkheader(len(fname))
1935 yield changegroup.chunkheader(len(fname))
1938 yield fname
1936 yield fname
1939 lookup = lookuprevlink_func(filerevlog)
1937 lookup = lookuprevlink_func(filerevlog)
1940 for chnk in filerevlog.group(nodeiter, lookup):
1938 for chnk in filerevlog.group(nodeiter, lookup):
1941 yield chnk
1939 yield chnk
1942
1940
1943 yield changegroup.closechunk()
1941 yield changegroup.closechunk()
1944
1942
1945 if nodes:
1943 if nodes:
1946 self.hook('outgoing', node=hex(nodes[0]), source=source)
1944 self.hook('outgoing', node=hex(nodes[0]), source=source)
1947
1945
1948 return util.chunkbuffer(gengroup())
1946 return util.chunkbuffer(gengroup())
1949
1947
1950 def addchangegroup(self, source, srctype, url, emptyok=False):
1948 def addchangegroup(self, source, srctype, url, emptyok=False):
1951 """add changegroup to repo.
1949 """add changegroup to repo.
1952
1950
1953 return values:
1951 return values:
1954 - nothing changed or no source: 0
1952 - nothing changed or no source: 0
1955 - more heads than before: 1+added heads (2..n)
1953 - more heads than before: 1+added heads (2..n)
1956 - less heads than before: -1-removed heads (-2..-n)
1954 - less heads than before: -1-removed heads (-2..-n)
1957 - number of heads stays the same: 1
1955 - number of heads stays the same: 1
1958 """
1956 """
1959 def csmap(x):
1957 def csmap(x):
1960 self.ui.debug(_("add changeset %s\n") % short(x))
1958 self.ui.debug(_("add changeset %s\n") % short(x))
1961 return len(cl)
1959 return len(cl)
1962
1960
1963 def revmap(x):
1961 def revmap(x):
1964 return cl.rev(x)
1962 return cl.rev(x)
1965
1963
1966 if not source:
1964 if not source:
1967 return 0
1965 return 0
1968
1966
1969 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1967 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1970
1968
1971 changesets = files = revisions = 0
1969 changesets = files = revisions = 0
1972
1970
1973 # write changelog data to temp files so concurrent readers will not see
1971 # write changelog data to temp files so concurrent readers will not see
1974 # inconsistent view
1972 # inconsistent view
1975 cl = self.changelog
1973 cl = self.changelog
1976 cl.delayupdate()
1974 cl.delayupdate()
1977 oldheads = len(cl.heads())
1975 oldheads = len(cl.heads())
1978
1976
1979 tr = self.transaction()
1977 tr = self.transaction()
1980 try:
1978 try:
1981 trp = weakref.proxy(tr)
1979 trp = weakref.proxy(tr)
1982 # pull off the changeset group
1980 # pull off the changeset group
1983 self.ui.status(_("adding changesets\n"))
1981 self.ui.status(_("adding changesets\n"))
1984 clstart = len(cl)
1982 clstart = len(cl)
1985 chunkiter = changegroup.chunkiter(source)
1983 chunkiter = changegroup.chunkiter(source)
1986 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1984 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1987 raise util.Abort(_("received changelog group is empty"))
1985 raise util.Abort(_("received changelog group is empty"))
1988 clend = len(cl)
1986 clend = len(cl)
1989 changesets = clend - clstart
1987 changesets = clend - clstart
1990
1988
1991 # pull off the manifest group
1989 # pull off the manifest group
1992 self.ui.status(_("adding manifests\n"))
1990 self.ui.status(_("adding manifests\n"))
1993 chunkiter = changegroup.chunkiter(source)
1991 chunkiter = changegroup.chunkiter(source)
1994 # no need to check for empty manifest group here:
1992 # no need to check for empty manifest group here:
1995 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1993 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1996 # no new manifest will be created and the manifest group will
1994 # no new manifest will be created and the manifest group will
1997 # be empty during the pull
1995 # be empty during the pull
1998 self.manifest.addgroup(chunkiter, revmap, trp)
1996 self.manifest.addgroup(chunkiter, revmap, trp)
1999
1997
2000 # process the files
1998 # process the files
2001 self.ui.status(_("adding file changes\n"))
1999 self.ui.status(_("adding file changes\n"))
2002 while 1:
2000 while 1:
2003 f = changegroup.getchunk(source)
2001 f = changegroup.getchunk(source)
2004 if not f:
2002 if not f:
2005 break
2003 break
2006 self.ui.debug(_("adding %s revisions\n") % f)
2004 self.ui.debug(_("adding %s revisions\n") % f)
2007 fl = self.file(f)
2005 fl = self.file(f)
2008 o = len(fl)
2006 o = len(fl)
2009 chunkiter = changegroup.chunkiter(source)
2007 chunkiter = changegroup.chunkiter(source)
2010 if fl.addgroup(chunkiter, revmap, trp) is None:
2008 if fl.addgroup(chunkiter, revmap, trp) is None:
2011 raise util.Abort(_("received file revlog group is empty"))
2009 raise util.Abort(_("received file revlog group is empty"))
2012 revisions += len(fl) - o
2010 revisions += len(fl) - o
2013 files += 1
2011 files += 1
2014
2012
2015 newheads = len(cl.heads())
2013 newheads = len(cl.heads())
2016 heads = ""
2014 heads = ""
2017 if oldheads and newheads != oldheads:
2015 if oldheads and newheads != oldheads:
2018 heads = _(" (%+d heads)") % (newheads - oldheads)
2016 heads = _(" (%+d heads)") % (newheads - oldheads)
2019
2017
2020 self.ui.status(_("added %d changesets"
2018 self.ui.status(_("added %d changesets"
2021 " with %d changes to %d files%s\n")
2019 " with %d changes to %d files%s\n")
2022 % (changesets, revisions, files, heads))
2020 % (changesets, revisions, files, heads))
2023
2021
2024 if changesets > 0:
2022 if changesets > 0:
2025 p = lambda: cl.writepending() and self.root or ""
2023 p = lambda: cl.writepending() and self.root or ""
2026 self.hook('pretxnchangegroup', throw=True,
2024 self.hook('pretxnchangegroup', throw=True,
2027 node=hex(cl.node(clstart)), source=srctype,
2025 node=hex(cl.node(clstart)), source=srctype,
2028 url=url, pending=p)
2026 url=url, pending=p)
2029
2027
2030 # make changelog see real files again
2028 # make changelog see real files again
2031 cl.finalize(trp)
2029 cl.finalize(trp)
2032
2030
2033 tr.close()
2031 tr.close()
2034 finally:
2032 finally:
2035 del tr
2033 del tr
2036
2034
2037 if changesets > 0:
2035 if changesets > 0:
2038 # forcefully update the on-disk branch cache
2036 # forcefully update the on-disk branch cache
2039 self.ui.debug(_("updating the branch cache\n"))
2037 self.ui.debug(_("updating the branch cache\n"))
2040 self.branchtags()
2038 self.branchtags()
2041 self.hook("changegroup", node=hex(cl.node(clstart)),
2039 self.hook("changegroup", node=hex(cl.node(clstart)),
2042 source=srctype, url=url)
2040 source=srctype, url=url)
2043
2041
2044 for i in xrange(clstart, clend):
2042 for i in xrange(clstart, clend):
2045 self.hook("incoming", node=hex(cl.node(i)),
2043 self.hook("incoming", node=hex(cl.node(i)),
2046 source=srctype, url=url)
2044 source=srctype, url=url)
2047
2045
2048 # never return 0 here:
2046 # never return 0 here:
2049 if newheads < oldheads:
2047 if newheads < oldheads:
2050 return newheads - oldheads - 1
2048 return newheads - oldheads - 1
2051 else:
2049 else:
2052 return newheads - oldheads + 1
2050 return newheads - oldheads + 1
2053
2051
2054
2052
2055 def stream_in(self, remote):
2053 def stream_in(self, remote):
2056 fp = remote.stream_out()
2054 fp = remote.stream_out()
2057 l = fp.readline()
2055 l = fp.readline()
2058 try:
2056 try:
2059 resp = int(l)
2057 resp = int(l)
2060 except ValueError:
2058 except ValueError:
2061 raise error.ResponseError(
2059 raise error.ResponseError(
2062 _('Unexpected response from remote server:'), l)
2060 _('Unexpected response from remote server:'), l)
2063 if resp == 1:
2061 if resp == 1:
2064 raise util.Abort(_('operation forbidden by server'))
2062 raise util.Abort(_('operation forbidden by server'))
2065 elif resp == 2:
2063 elif resp == 2:
2066 raise util.Abort(_('locking the remote repository failed'))
2064 raise util.Abort(_('locking the remote repository failed'))
2067 elif resp != 0:
2065 elif resp != 0:
2068 raise util.Abort(_('the server sent an unknown error code'))
2066 raise util.Abort(_('the server sent an unknown error code'))
2069 self.ui.status(_('streaming all changes\n'))
2067 self.ui.status(_('streaming all changes\n'))
2070 l = fp.readline()
2068 l = fp.readline()
2071 try:
2069 try:
2072 total_files, total_bytes = map(int, l.split(' ', 1))
2070 total_files, total_bytes = map(int, l.split(' ', 1))
2073 except (ValueError, TypeError):
2071 except (ValueError, TypeError):
2074 raise error.ResponseError(
2072 raise error.ResponseError(
2075 _('Unexpected response from remote server:'), l)
2073 _('Unexpected response from remote server:'), l)
2076 self.ui.status(_('%d files to transfer, %s of data\n') %
2074 self.ui.status(_('%d files to transfer, %s of data\n') %
2077 (total_files, util.bytecount(total_bytes)))
2075 (total_files, util.bytecount(total_bytes)))
2078 start = time.time()
2076 start = time.time()
2079 for i in xrange(total_files):
2077 for i in xrange(total_files):
2080 # XXX doesn't support '\n' or '\r' in filenames
2078 # XXX doesn't support '\n' or '\r' in filenames
2081 l = fp.readline()
2079 l = fp.readline()
2082 try:
2080 try:
2083 name, size = l.split('\0', 1)
2081 name, size = l.split('\0', 1)
2084 size = int(size)
2082 size = int(size)
2085 except (ValueError, TypeError):
2083 except (ValueError, TypeError):
2086 raise error.ResponseError(
2084 raise error.ResponseError(
2087 _('Unexpected response from remote server:'), l)
2085 _('Unexpected response from remote server:'), l)
2088 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2086 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2089 # for backwards compat, name was partially encoded
2087 # for backwards compat, name was partially encoded
2090 ofp = self.sopener(store.decodedir(name), 'w')
2088 ofp = self.sopener(store.decodedir(name), 'w')
2091 for chunk in util.filechunkiter(fp, limit=size):
2089 for chunk in util.filechunkiter(fp, limit=size):
2092 ofp.write(chunk)
2090 ofp.write(chunk)
2093 ofp.close()
2091 ofp.close()
2094 elapsed = time.time() - start
2092 elapsed = time.time() - start
2095 if elapsed <= 0:
2093 if elapsed <= 0:
2096 elapsed = 0.001
2094 elapsed = 0.001
2097 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2095 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2098 (util.bytecount(total_bytes), elapsed,
2096 (util.bytecount(total_bytes), elapsed,
2099 util.bytecount(total_bytes / elapsed)))
2097 util.bytecount(total_bytes / elapsed)))
2100 self.invalidate()
2098 self.invalidate()
2101 return len(self.heads()) + 1
2099 return len(self.heads()) + 1
2102
2100
2103 def clone(self, remote, heads=[], stream=False):
2101 def clone(self, remote, heads=[], stream=False):
2104 '''clone remote repository.
2102 '''clone remote repository.
2105
2103
2106 keyword arguments:
2104 keyword arguments:
2107 heads: list of revs to clone (forces use of pull)
2105 heads: list of revs to clone (forces use of pull)
2108 stream: use streaming clone if possible'''
2106 stream: use streaming clone if possible'''
2109
2107
2110 # now, all clients that can request uncompressed clones can
2108 # now, all clients that can request uncompressed clones can
2111 # read repo formats supported by all servers that can serve
2109 # read repo formats supported by all servers that can serve
2112 # them.
2110 # them.
2113
2111
2114 # if revlog format changes, client will have to check version
2112 # if revlog format changes, client will have to check version
2115 # and format flags on "stream" capability, and use
2113 # and format flags on "stream" capability, and use
2116 # uncompressed only if compatible.
2114 # uncompressed only if compatible.
2117
2115
2118 if stream and not heads and remote.capable('stream'):
2116 if stream and not heads and remote.capable('stream'):
2119 return self.stream_in(remote)
2117 return self.stream_in(remote)
2120 return self.pull(remote, heads)
2118 return self.pull(remote, heads)
2121
2119
2122 # used to avoid circular references so destructors work
2120 # used to avoid circular references so destructors work
2123 def aftertrans(files):
2121 def aftertrans(files):
2124 renamefiles = [tuple(t) for t in files]
2122 renamefiles = [tuple(t) for t in files]
2125 def a():
2123 def a():
2126 for src, dest in renamefiles:
2124 for src, dest in renamefiles:
2127 util.rename(src, dest)
2125 util.rename(src, dest)
2128 return a
2126 return a
2129
2127
2130 def instance(ui, path, create):
2128 def instance(ui, path, create):
2131 return localrepository(ui, util.drop_scheme('file', path), create)
2129 return localrepository(ui, util.drop_scheme('file', path), create)
2132
2130
2133 def islocal(path):
2131 def islocal(path):
2134 return True
2132 return True
General Comments 0
You need to be logged in to leave comments. Login now