##// END OF EJS Templates
repo: add internal support for sharing store directories...
Matt Mackall -
r8799:87d1fd40 default
parent child Browse files
Show More
@@ -1,2149 +1,2160 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16 from lock import release
16 from lock import release
17 import weakref, stat, errno, os, time, inspect
17 import weakref, stat, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19
19
20 class localrepository(repo.repository):
20 class localrepository(repo.repository):
21 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
21 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
22 supported = set('revlogv1 store fncache'.split())
22 supported = set('revlogv1 store fncache shared'.split())
23
23
24 def __init__(self, baseui, path=None, create=0):
24 def __init__(self, baseui, path=None, create=0):
25 repo.repository.__init__(self)
25 repo.repository.__init__(self)
26 self.root = os.path.realpath(path)
26 self.root = os.path.realpath(path)
27 self.path = os.path.join(self.root, ".hg")
27 self.path = os.path.join(self.root, ".hg")
28 self.origroot = path
28 self.origroot = path
29 self.opener = util.opener(self.path)
29 self.opener = util.opener(self.path)
30 self.wopener = util.opener(self.root)
30 self.wopener = util.opener(self.root)
31 self.baseui = baseui
31 self.baseui = baseui
32 self.ui = baseui.copy()
32 self.ui = baseui.copy()
33
33
34 try:
34 try:
35 self.ui.readconfig(self.join("hgrc"), self.root)
35 self.ui.readconfig(self.join("hgrc"), self.root)
36 extensions.loadall(self.ui)
36 extensions.loadall(self.ui)
37 except IOError:
37 except IOError:
38 pass
38 pass
39
39
40 if not os.path.isdir(self.path):
40 if not os.path.isdir(self.path):
41 if create:
41 if create:
42 if not os.path.exists(path):
42 if not os.path.exists(path):
43 os.mkdir(path)
43 os.mkdir(path)
44 os.mkdir(self.path)
44 os.mkdir(self.path)
45 requirements = ["revlogv1"]
45 requirements = ["revlogv1"]
46 if self.ui.configbool('format', 'usestore', True):
46 if self.ui.configbool('format', 'usestore', True):
47 os.mkdir(os.path.join(self.path, "store"))
47 os.mkdir(os.path.join(self.path, "store"))
48 requirements.append("store")
48 requirements.append("store")
49 if self.ui.configbool('format', 'usefncache', True):
49 if self.ui.configbool('format', 'usefncache', True):
50 requirements.append("fncache")
50 requirements.append("fncache")
51 # create an invalid changelog
51 # create an invalid changelog
52 self.opener("00changelog.i", "a").write(
52 self.opener("00changelog.i", "a").write(
53 '\0\0\0\2' # represents revlogv2
53 '\0\0\0\2' # represents revlogv2
54 ' dummy changelog to prevent using the old repo layout'
54 ' dummy changelog to prevent using the old repo layout'
55 )
55 )
56 reqfile = self.opener("requires", "w")
56 reqfile = self.opener("requires", "w")
57 for r in requirements:
57 for r in requirements:
58 reqfile.write("%s\n" % r)
58 reqfile.write("%s\n" % r)
59 reqfile.close()
59 reqfile.close()
60 else:
60 else:
61 raise error.RepoError(_("repository %s not found") % path)
61 raise error.RepoError(_("repository %s not found") % path)
62 elif create:
62 elif create:
63 raise error.RepoError(_("repository %s already exists") % path)
63 raise error.RepoError(_("repository %s already exists") % path)
64 else:
64 else:
65 # find requirements
65 # find requirements
66 requirements = set()
66 requirements = set()
67 try:
67 try:
68 requirements = set(self.opener("requires").read().splitlines())
68 requirements = set(self.opener("requires").read().splitlines())
69 except IOError, inst:
69 except IOError, inst:
70 if inst.errno != errno.ENOENT:
70 if inst.errno != errno.ENOENT:
71 raise
71 raise
72 for r in requirements - self.supported:
72 for r in requirements - self.supported:
73 raise error.RepoError(_("requirement '%s' not supported") % r)
73 raise error.RepoError(_("requirement '%s' not supported") % r)
74
74
75 self.store = store.store(requirements, self.path, util.opener)
75 self.sharedpath = self.path
76 try:
77 s = os.path.realpath(self.opener("sharedpath").read())
78 if not os.path.exists(s):
79 raise error.RepoError(
80 _('.hg/sharedpath points to nonexistent directory %s' % s))
81 self.sharedpath = s
82 except IOError, inst:
83 if inst.errno != errno.ENOENT:
84 raise
85
86 self.store = store.store(requirements, self.sharedpath, util.opener)
76 self.spath = self.store.path
87 self.spath = self.store.path
77 self.sopener = self.store.opener
88 self.sopener = self.store.opener
78 self.sjoin = self.store.join
89 self.sjoin = self.store.join
79 self.opener.createmode = self.store.createmode
90 self.opener.createmode = self.store.createmode
80
91
81 self.tagscache = None
92 self.tagscache = None
82 self._tagstypecache = None
93 self._tagstypecache = None
83 self.branchcache = None
94 self.branchcache = None
84 self._ubranchcache = None # UTF-8 version of branchcache
95 self._ubranchcache = None # UTF-8 version of branchcache
85 self._branchcachetip = None
96 self._branchcachetip = None
86 self.nodetagscache = None
97 self.nodetagscache = None
87 self.filterpats = {}
98 self.filterpats = {}
88 self._datafilters = {}
99 self._datafilters = {}
89 self._transref = self._lockref = self._wlockref = None
100 self._transref = self._lockref = self._wlockref = None
90
101
91 @propertycache
102 @propertycache
92 def changelog(self):
103 def changelog(self):
93 c = changelog.changelog(self.sopener)
104 c = changelog.changelog(self.sopener)
94 if 'HG_PENDING' in os.environ:
105 if 'HG_PENDING' in os.environ:
95 p = os.environ['HG_PENDING']
106 p = os.environ['HG_PENDING']
96 if p.startswith(self.root):
107 if p.startswith(self.root):
97 c.readpending('00changelog.i.a')
108 c.readpending('00changelog.i.a')
98 self.sopener.defversion = c.version
109 self.sopener.defversion = c.version
99 return c
110 return c
100
111
101 @propertycache
112 @propertycache
102 def manifest(self):
113 def manifest(self):
103 return manifest.manifest(self.sopener)
114 return manifest.manifest(self.sopener)
104
115
105 @propertycache
116 @propertycache
106 def dirstate(self):
117 def dirstate(self):
107 return dirstate.dirstate(self.opener, self.ui, self.root)
118 return dirstate.dirstate(self.opener, self.ui, self.root)
108
119
109 def __getitem__(self, changeid):
120 def __getitem__(self, changeid):
110 if changeid is None:
121 if changeid is None:
111 return context.workingctx(self)
122 return context.workingctx(self)
112 return context.changectx(self, changeid)
123 return context.changectx(self, changeid)
113
124
114 def __nonzero__(self):
125 def __nonzero__(self):
115 return True
126 return True
116
127
117 def __len__(self):
128 def __len__(self):
118 return len(self.changelog)
129 return len(self.changelog)
119
130
120 def __iter__(self):
131 def __iter__(self):
121 for i in xrange(len(self)):
132 for i in xrange(len(self)):
122 yield i
133 yield i
123
134
124 def url(self):
135 def url(self):
125 return 'file:' + self.root
136 return 'file:' + self.root
126
137
127 def hook(self, name, throw=False, **args):
138 def hook(self, name, throw=False, **args):
128 return hook.hook(self.ui, self, name, throw, **args)
139 return hook.hook(self.ui, self, name, throw, **args)
129
140
130 tag_disallowed = ':\r\n'
141 tag_disallowed = ':\r\n'
131
142
132 def _tag(self, names, node, message, local, user, date, extra={}):
143 def _tag(self, names, node, message, local, user, date, extra={}):
133 if isinstance(names, str):
144 if isinstance(names, str):
134 allchars = names
145 allchars = names
135 names = (names,)
146 names = (names,)
136 else:
147 else:
137 allchars = ''.join(names)
148 allchars = ''.join(names)
138 for c in self.tag_disallowed:
149 for c in self.tag_disallowed:
139 if c in allchars:
150 if c in allchars:
140 raise util.Abort(_('%r cannot be used in a tag name') % c)
151 raise util.Abort(_('%r cannot be used in a tag name') % c)
141
152
142 for name in names:
153 for name in names:
143 self.hook('pretag', throw=True, node=hex(node), tag=name,
154 self.hook('pretag', throw=True, node=hex(node), tag=name,
144 local=local)
155 local=local)
145
156
146 def writetags(fp, names, munge, prevtags):
157 def writetags(fp, names, munge, prevtags):
147 fp.seek(0, 2)
158 fp.seek(0, 2)
148 if prevtags and prevtags[-1] != '\n':
159 if prevtags and prevtags[-1] != '\n':
149 fp.write('\n')
160 fp.write('\n')
150 for name in names:
161 for name in names:
151 m = munge and munge(name) or name
162 m = munge and munge(name) or name
152 if self._tagstypecache and name in self._tagstypecache:
163 if self._tagstypecache and name in self._tagstypecache:
153 old = self.tagscache.get(name, nullid)
164 old = self.tagscache.get(name, nullid)
154 fp.write('%s %s\n' % (hex(old), m))
165 fp.write('%s %s\n' % (hex(old), m))
155 fp.write('%s %s\n' % (hex(node), m))
166 fp.write('%s %s\n' % (hex(node), m))
156 fp.close()
167 fp.close()
157
168
158 prevtags = ''
169 prevtags = ''
159 if local:
170 if local:
160 try:
171 try:
161 fp = self.opener('localtags', 'r+')
172 fp = self.opener('localtags', 'r+')
162 except IOError:
173 except IOError:
163 fp = self.opener('localtags', 'a')
174 fp = self.opener('localtags', 'a')
164 else:
175 else:
165 prevtags = fp.read()
176 prevtags = fp.read()
166
177
167 # local tags are stored in the current charset
178 # local tags are stored in the current charset
168 writetags(fp, names, None, prevtags)
179 writetags(fp, names, None, prevtags)
169 for name in names:
180 for name in names:
170 self.hook('tag', node=hex(node), tag=name, local=local)
181 self.hook('tag', node=hex(node), tag=name, local=local)
171 return
182 return
172
183
173 try:
184 try:
174 fp = self.wfile('.hgtags', 'rb+')
185 fp = self.wfile('.hgtags', 'rb+')
175 except IOError:
186 except IOError:
176 fp = self.wfile('.hgtags', 'ab')
187 fp = self.wfile('.hgtags', 'ab')
177 else:
188 else:
178 prevtags = fp.read()
189 prevtags = fp.read()
179
190
180 # committed tags are stored in UTF-8
191 # committed tags are stored in UTF-8
181 writetags(fp, names, encoding.fromlocal, prevtags)
192 writetags(fp, names, encoding.fromlocal, prevtags)
182
193
183 if '.hgtags' not in self.dirstate:
194 if '.hgtags' not in self.dirstate:
184 self.add(['.hgtags'])
195 self.add(['.hgtags'])
185
196
186 m = match_.exact(self.root, '', ['.hgtags'])
197 m = match_.exact(self.root, '', ['.hgtags'])
187 tagnode = self.commit(message, user, date, extra=extra, match=m)
198 tagnode = self.commit(message, user, date, extra=extra, match=m)
188
199
189 for name in names:
200 for name in names:
190 self.hook('tag', node=hex(node), tag=name, local=local)
201 self.hook('tag', node=hex(node), tag=name, local=local)
191
202
192 return tagnode
203 return tagnode
193
204
194 def tag(self, names, node, message, local, user, date):
205 def tag(self, names, node, message, local, user, date):
195 '''tag a revision with one or more symbolic names.
206 '''tag a revision with one or more symbolic names.
196
207
197 names is a list of strings or, when adding a single tag, names may be a
208 names is a list of strings or, when adding a single tag, names may be a
198 string.
209 string.
199
210
200 if local is True, the tags are stored in a per-repository file.
211 if local is True, the tags are stored in a per-repository file.
201 otherwise, they are stored in the .hgtags file, and a new
212 otherwise, they are stored in the .hgtags file, and a new
202 changeset is committed with the change.
213 changeset is committed with the change.
203
214
204 keyword arguments:
215 keyword arguments:
205
216
206 local: whether to store tags in non-version-controlled file
217 local: whether to store tags in non-version-controlled file
207 (default False)
218 (default False)
208
219
209 message: commit message to use if committing
220 message: commit message to use if committing
210
221
211 user: name of user to use if committing
222 user: name of user to use if committing
212
223
213 date: date tuple to use if committing'''
224 date: date tuple to use if committing'''
214
225
215 for x in self.status()[:5]:
226 for x in self.status()[:5]:
216 if '.hgtags' in x:
227 if '.hgtags' in x:
217 raise util.Abort(_('working copy of .hgtags is changed '
228 raise util.Abort(_('working copy of .hgtags is changed '
218 '(please commit .hgtags manually)'))
229 '(please commit .hgtags manually)'))
219
230
220 self.tags() # instantiate the cache
231 self.tags() # instantiate the cache
221 self._tag(names, node, message, local, user, date)
232 self._tag(names, node, message, local, user, date)
222
233
223 def tags(self):
234 def tags(self):
224 '''return a mapping of tag to node'''
235 '''return a mapping of tag to node'''
225 if self.tagscache:
236 if self.tagscache:
226 return self.tagscache
237 return self.tagscache
227
238
228 globaltags = {}
239 globaltags = {}
229 tagtypes = {}
240 tagtypes = {}
230
241
231 def readtags(lines, fn, tagtype):
242 def readtags(lines, fn, tagtype):
232 filetags = {}
243 filetags = {}
233 count = 0
244 count = 0
234
245
235 def warn(msg):
246 def warn(msg):
236 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
247 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
237
248
238 for l in lines:
249 for l in lines:
239 count += 1
250 count += 1
240 if not l:
251 if not l:
241 continue
252 continue
242 s = l.split(" ", 1)
253 s = l.split(" ", 1)
243 if len(s) != 2:
254 if len(s) != 2:
244 warn(_("cannot parse entry"))
255 warn(_("cannot parse entry"))
245 continue
256 continue
246 node, key = s
257 node, key = s
247 key = encoding.tolocal(key.strip()) # stored in UTF-8
258 key = encoding.tolocal(key.strip()) # stored in UTF-8
248 try:
259 try:
249 bin_n = bin(node)
260 bin_n = bin(node)
250 except TypeError:
261 except TypeError:
251 warn(_("node '%s' is not well formed") % node)
262 warn(_("node '%s' is not well formed") % node)
252 continue
263 continue
253 if bin_n not in self.changelog.nodemap:
264 if bin_n not in self.changelog.nodemap:
254 warn(_("tag '%s' refers to unknown node") % key)
265 warn(_("tag '%s' refers to unknown node") % key)
255 continue
266 continue
256
267
257 h = []
268 h = []
258 if key in filetags:
269 if key in filetags:
259 n, h = filetags[key]
270 n, h = filetags[key]
260 h.append(n)
271 h.append(n)
261 filetags[key] = (bin_n, h)
272 filetags[key] = (bin_n, h)
262
273
263 for k, nh in filetags.iteritems():
274 for k, nh in filetags.iteritems():
264 if k not in globaltags:
275 if k not in globaltags:
265 globaltags[k] = nh
276 globaltags[k] = nh
266 tagtypes[k] = tagtype
277 tagtypes[k] = tagtype
267 continue
278 continue
268
279
269 # we prefer the global tag if:
280 # we prefer the global tag if:
270 # it supercedes us OR
281 # it supercedes us OR
271 # mutual supercedes and it has a higher rank
282 # mutual supercedes and it has a higher rank
272 # otherwise we win because we're tip-most
283 # otherwise we win because we're tip-most
273 an, ah = nh
284 an, ah = nh
274 bn, bh = globaltags[k]
285 bn, bh = globaltags[k]
275 if (bn != an and an in bh and
286 if (bn != an and an in bh and
276 (bn not in ah or len(bh) > len(ah))):
287 (bn not in ah or len(bh) > len(ah))):
277 an = bn
288 an = bn
278 ah.extend([n for n in bh if n not in ah])
289 ah.extend([n for n in bh if n not in ah])
279 globaltags[k] = an, ah
290 globaltags[k] = an, ah
280 tagtypes[k] = tagtype
291 tagtypes[k] = tagtype
281
292
282 # read the tags file from each head, ending with the tip
293 # read the tags file from each head, ending with the tip
283 f = None
294 f = None
284 for rev, node, fnode in self._hgtagsnodes():
295 for rev, node, fnode in self._hgtagsnodes():
285 f = (f and f.filectx(fnode) or
296 f = (f and f.filectx(fnode) or
286 self.filectx('.hgtags', fileid=fnode))
297 self.filectx('.hgtags', fileid=fnode))
287 readtags(f.data().splitlines(), f, "global")
298 readtags(f.data().splitlines(), f, "global")
288
299
289 try:
300 try:
290 data = encoding.fromlocal(self.opener("localtags").read())
301 data = encoding.fromlocal(self.opener("localtags").read())
291 # localtags are stored in the local character set
302 # localtags are stored in the local character set
292 # while the internal tag table is stored in UTF-8
303 # while the internal tag table is stored in UTF-8
293 readtags(data.splitlines(), "localtags", "local")
304 readtags(data.splitlines(), "localtags", "local")
294 except IOError:
305 except IOError:
295 pass
306 pass
296
307
297 self.tagscache = {}
308 self.tagscache = {}
298 self._tagstypecache = {}
309 self._tagstypecache = {}
299 for k, nh in globaltags.iteritems():
310 for k, nh in globaltags.iteritems():
300 n = nh[0]
311 n = nh[0]
301 if n != nullid:
312 if n != nullid:
302 self.tagscache[k] = n
313 self.tagscache[k] = n
303 self._tagstypecache[k] = tagtypes[k]
314 self._tagstypecache[k] = tagtypes[k]
304 self.tagscache['tip'] = self.changelog.tip()
315 self.tagscache['tip'] = self.changelog.tip()
305 return self.tagscache
316 return self.tagscache
306
317
307 def tagtype(self, tagname):
318 def tagtype(self, tagname):
308 '''
319 '''
309 return the type of the given tag. result can be:
320 return the type of the given tag. result can be:
310
321
311 'local' : a local tag
322 'local' : a local tag
312 'global' : a global tag
323 'global' : a global tag
313 None : tag does not exist
324 None : tag does not exist
314 '''
325 '''
315
326
316 self.tags()
327 self.tags()
317
328
318 return self._tagstypecache.get(tagname)
329 return self._tagstypecache.get(tagname)
319
330
320 def _hgtagsnodes(self):
331 def _hgtagsnodes(self):
321 last = {}
332 last = {}
322 ret = []
333 ret = []
323 for node in reversed(self.heads()):
334 for node in reversed(self.heads()):
324 c = self[node]
335 c = self[node]
325 rev = c.rev()
336 rev = c.rev()
326 try:
337 try:
327 fnode = c.filenode('.hgtags')
338 fnode = c.filenode('.hgtags')
328 except error.LookupError:
339 except error.LookupError:
329 continue
340 continue
330 ret.append((rev, node, fnode))
341 ret.append((rev, node, fnode))
331 if fnode in last:
342 if fnode in last:
332 ret[last[fnode]] = None
343 ret[last[fnode]] = None
333 last[fnode] = len(ret) - 1
344 last[fnode] = len(ret) - 1
334 return [item for item in ret if item]
345 return [item for item in ret if item]
335
346
336 def tagslist(self):
347 def tagslist(self):
337 '''return a list of tags ordered by revision'''
348 '''return a list of tags ordered by revision'''
338 l = []
349 l = []
339 for t, n in self.tags().iteritems():
350 for t, n in self.tags().iteritems():
340 try:
351 try:
341 r = self.changelog.rev(n)
352 r = self.changelog.rev(n)
342 except:
353 except:
343 r = -2 # sort to the beginning of the list if unknown
354 r = -2 # sort to the beginning of the list if unknown
344 l.append((r, t, n))
355 l.append((r, t, n))
345 return [(t, n) for r, t, n in sorted(l)]
356 return [(t, n) for r, t, n in sorted(l)]
346
357
347 def nodetags(self, node):
358 def nodetags(self, node):
348 '''return the tags associated with a node'''
359 '''return the tags associated with a node'''
349 if not self.nodetagscache:
360 if not self.nodetagscache:
350 self.nodetagscache = {}
361 self.nodetagscache = {}
351 for t, n in self.tags().iteritems():
362 for t, n in self.tags().iteritems():
352 self.nodetagscache.setdefault(n, []).append(t)
363 self.nodetagscache.setdefault(n, []).append(t)
353 return self.nodetagscache.get(node, [])
364 return self.nodetagscache.get(node, [])
354
365
355 def _branchtags(self, partial, lrev):
366 def _branchtags(self, partial, lrev):
356 # TODO: rename this function?
367 # TODO: rename this function?
357 tiprev = len(self) - 1
368 tiprev = len(self) - 1
358 if lrev != tiprev:
369 if lrev != tiprev:
359 self._updatebranchcache(partial, lrev+1, tiprev+1)
370 self._updatebranchcache(partial, lrev+1, tiprev+1)
360 self._writebranchcache(partial, self.changelog.tip(), tiprev)
371 self._writebranchcache(partial, self.changelog.tip(), tiprev)
361
372
362 return partial
373 return partial
363
374
364 def branchmap(self):
375 def branchmap(self):
365 tip = self.changelog.tip()
376 tip = self.changelog.tip()
366 if self.branchcache is not None and self._branchcachetip == tip:
377 if self.branchcache is not None and self._branchcachetip == tip:
367 return self.branchcache
378 return self.branchcache
368
379
369 oldtip = self._branchcachetip
380 oldtip = self._branchcachetip
370 self._branchcachetip = tip
381 self._branchcachetip = tip
371 if self.branchcache is None:
382 if self.branchcache is None:
372 self.branchcache = {} # avoid recursion in changectx
383 self.branchcache = {} # avoid recursion in changectx
373 else:
384 else:
374 self.branchcache.clear() # keep using the same dict
385 self.branchcache.clear() # keep using the same dict
375 if oldtip is None or oldtip not in self.changelog.nodemap:
386 if oldtip is None or oldtip not in self.changelog.nodemap:
376 partial, last, lrev = self._readbranchcache()
387 partial, last, lrev = self._readbranchcache()
377 else:
388 else:
378 lrev = self.changelog.rev(oldtip)
389 lrev = self.changelog.rev(oldtip)
379 partial = self._ubranchcache
390 partial = self._ubranchcache
380
391
381 self._branchtags(partial, lrev)
392 self._branchtags(partial, lrev)
382 # this private cache holds all heads (not just tips)
393 # this private cache holds all heads (not just tips)
383 self._ubranchcache = partial
394 self._ubranchcache = partial
384
395
385 # the branch cache is stored on disk as UTF-8, but in the local
396 # the branch cache is stored on disk as UTF-8, but in the local
386 # charset internally
397 # charset internally
387 for k, v in partial.iteritems():
398 for k, v in partial.iteritems():
388 self.branchcache[encoding.tolocal(k)] = v
399 self.branchcache[encoding.tolocal(k)] = v
389 return self.branchcache
400 return self.branchcache
390
401
391
402
392 def branchtags(self):
403 def branchtags(self):
393 '''return a dict where branch names map to the tipmost head of
404 '''return a dict where branch names map to the tipmost head of
394 the branch, open heads come before closed'''
405 the branch, open heads come before closed'''
395 bt = {}
406 bt = {}
396 for bn, heads in self.branchmap().iteritems():
407 for bn, heads in self.branchmap().iteritems():
397 head = None
408 head = None
398 for i in range(len(heads)-1, -1, -1):
409 for i in range(len(heads)-1, -1, -1):
399 h = heads[i]
410 h = heads[i]
400 if 'close' not in self.changelog.read(h)[5]:
411 if 'close' not in self.changelog.read(h)[5]:
401 head = h
412 head = h
402 break
413 break
403 # no open heads were found
414 # no open heads were found
404 if head is None:
415 if head is None:
405 head = heads[-1]
416 head = heads[-1]
406 bt[bn] = head
417 bt[bn] = head
407 return bt
418 return bt
408
419
409
420
410 def _readbranchcache(self):
421 def _readbranchcache(self):
411 partial = {}
422 partial = {}
412 try:
423 try:
413 f = self.opener("branchheads.cache")
424 f = self.opener("branchheads.cache")
414 lines = f.read().split('\n')
425 lines = f.read().split('\n')
415 f.close()
426 f.close()
416 except (IOError, OSError):
427 except (IOError, OSError):
417 return {}, nullid, nullrev
428 return {}, nullid, nullrev
418
429
419 try:
430 try:
420 last, lrev = lines.pop(0).split(" ", 1)
431 last, lrev = lines.pop(0).split(" ", 1)
421 last, lrev = bin(last), int(lrev)
432 last, lrev = bin(last), int(lrev)
422 if lrev >= len(self) or self[lrev].node() != last:
433 if lrev >= len(self) or self[lrev].node() != last:
423 # invalidate the cache
434 # invalidate the cache
424 raise ValueError('invalidating branch cache (tip differs)')
435 raise ValueError('invalidating branch cache (tip differs)')
425 for l in lines:
436 for l in lines:
426 if not l: continue
437 if not l: continue
427 node, label = l.split(" ", 1)
438 node, label = l.split(" ", 1)
428 partial.setdefault(label.strip(), []).append(bin(node))
439 partial.setdefault(label.strip(), []).append(bin(node))
429 except KeyboardInterrupt:
440 except KeyboardInterrupt:
430 raise
441 raise
431 except Exception, inst:
442 except Exception, inst:
432 if self.ui.debugflag:
443 if self.ui.debugflag:
433 self.ui.warn(str(inst), '\n')
444 self.ui.warn(str(inst), '\n')
434 partial, last, lrev = {}, nullid, nullrev
445 partial, last, lrev = {}, nullid, nullrev
435 return partial, last, lrev
446 return partial, last, lrev
436
447
437 def _writebranchcache(self, branches, tip, tiprev):
448 def _writebranchcache(self, branches, tip, tiprev):
438 try:
449 try:
439 f = self.opener("branchheads.cache", "w", atomictemp=True)
450 f = self.opener("branchheads.cache", "w", atomictemp=True)
440 f.write("%s %s\n" % (hex(tip), tiprev))
451 f.write("%s %s\n" % (hex(tip), tiprev))
441 for label, nodes in branches.iteritems():
452 for label, nodes in branches.iteritems():
442 for node in nodes:
453 for node in nodes:
443 f.write("%s %s\n" % (hex(node), label))
454 f.write("%s %s\n" % (hex(node), label))
444 f.rename()
455 f.rename()
445 except (IOError, OSError):
456 except (IOError, OSError):
446 pass
457 pass
447
458
448 def _updatebranchcache(self, partial, start, end):
459 def _updatebranchcache(self, partial, start, end):
449 for r in xrange(start, end):
460 for r in xrange(start, end):
450 c = self[r]
461 c = self[r]
451 b = c.branch()
462 b = c.branch()
452 bheads = partial.setdefault(b, [])
463 bheads = partial.setdefault(b, [])
453 bheads.append(c.node())
464 bheads.append(c.node())
454 for p in c.parents():
465 for p in c.parents():
455 pn = p.node()
466 pn = p.node()
456 if pn in bheads:
467 if pn in bheads:
457 bheads.remove(pn)
468 bheads.remove(pn)
458
469
459 def lookup(self, key):
470 def lookup(self, key):
460 if isinstance(key, int):
471 if isinstance(key, int):
461 return self.changelog.node(key)
472 return self.changelog.node(key)
462 elif key == '.':
473 elif key == '.':
463 return self.dirstate.parents()[0]
474 return self.dirstate.parents()[0]
464 elif key == 'null':
475 elif key == 'null':
465 return nullid
476 return nullid
466 elif key == 'tip':
477 elif key == 'tip':
467 return self.changelog.tip()
478 return self.changelog.tip()
468 n = self.changelog._match(key)
479 n = self.changelog._match(key)
469 if n:
480 if n:
470 return n
481 return n
471 if key in self.tags():
482 if key in self.tags():
472 return self.tags()[key]
483 return self.tags()[key]
473 if key in self.branchtags():
484 if key in self.branchtags():
474 return self.branchtags()[key]
485 return self.branchtags()[key]
475 n = self.changelog._partialmatch(key)
486 n = self.changelog._partialmatch(key)
476 if n:
487 if n:
477 return n
488 return n
478
489
479 # can't find key, check if it might have come from damaged dirstate
490 # can't find key, check if it might have come from damaged dirstate
480 if key in self.dirstate.parents():
491 if key in self.dirstate.parents():
481 raise error.Abort(_("working directory has unknown parent '%s'!")
492 raise error.Abort(_("working directory has unknown parent '%s'!")
482 % short(key))
493 % short(key))
483 try:
494 try:
484 if len(key) == 20:
495 if len(key) == 20:
485 key = hex(key)
496 key = hex(key)
486 except:
497 except:
487 pass
498 pass
488 raise error.RepoError(_("unknown revision '%s'") % key)
499 raise error.RepoError(_("unknown revision '%s'") % key)
489
500
490 def local(self):
501 def local(self):
491 return True
502 return True
492
503
493 def join(self, f):
504 def join(self, f):
494 return os.path.join(self.path, f)
505 return os.path.join(self.path, f)
495
506
496 def wjoin(self, f):
507 def wjoin(self, f):
497 return os.path.join(self.root, f)
508 return os.path.join(self.root, f)
498
509
499 def rjoin(self, f):
510 def rjoin(self, f):
500 return os.path.join(self.root, util.pconvert(f))
511 return os.path.join(self.root, util.pconvert(f))
501
512
502 def file(self, f):
513 def file(self, f):
503 if f[0] == '/':
514 if f[0] == '/':
504 f = f[1:]
515 f = f[1:]
505 return filelog.filelog(self.sopener, f)
516 return filelog.filelog(self.sopener, f)
506
517
507 def changectx(self, changeid):
518 def changectx(self, changeid):
508 return self[changeid]
519 return self[changeid]
509
520
510 def parents(self, changeid=None):
521 def parents(self, changeid=None):
511 '''get list of changectxs for parents of changeid'''
522 '''get list of changectxs for parents of changeid'''
512 return self[changeid].parents()
523 return self[changeid].parents()
513
524
514 def filectx(self, path, changeid=None, fileid=None):
525 def filectx(self, path, changeid=None, fileid=None):
515 """changeid can be a changeset revision, node, or tag.
526 """changeid can be a changeset revision, node, or tag.
516 fileid can be a file revision or node."""
527 fileid can be a file revision or node."""
517 return context.filectx(self, path, changeid, fileid)
528 return context.filectx(self, path, changeid, fileid)
518
529
519 def getcwd(self):
530 def getcwd(self):
520 return self.dirstate.getcwd()
531 return self.dirstate.getcwd()
521
532
522 def pathto(self, f, cwd=None):
533 def pathto(self, f, cwd=None):
523 return self.dirstate.pathto(f, cwd)
534 return self.dirstate.pathto(f, cwd)
524
535
525 def wfile(self, f, mode='r'):
536 def wfile(self, f, mode='r'):
526 return self.wopener(f, mode)
537 return self.wopener(f, mode)
527
538
528 def _link(self, f):
539 def _link(self, f):
529 return os.path.islink(self.wjoin(f))
540 return os.path.islink(self.wjoin(f))
530
541
531 def _filter(self, filter, filename, data):
542 def _filter(self, filter, filename, data):
532 if filter not in self.filterpats:
543 if filter not in self.filterpats:
533 l = []
544 l = []
534 for pat, cmd in self.ui.configitems(filter):
545 for pat, cmd in self.ui.configitems(filter):
535 if cmd == '!':
546 if cmd == '!':
536 continue
547 continue
537 mf = match_.match(self.root, '', [pat])
548 mf = match_.match(self.root, '', [pat])
538 fn = None
549 fn = None
539 params = cmd
550 params = cmd
540 for name, filterfn in self._datafilters.iteritems():
551 for name, filterfn in self._datafilters.iteritems():
541 if cmd.startswith(name):
552 if cmd.startswith(name):
542 fn = filterfn
553 fn = filterfn
543 params = cmd[len(name):].lstrip()
554 params = cmd[len(name):].lstrip()
544 break
555 break
545 if not fn:
556 if not fn:
546 fn = lambda s, c, **kwargs: util.filter(s, c)
557 fn = lambda s, c, **kwargs: util.filter(s, c)
547 # Wrap old filters not supporting keyword arguments
558 # Wrap old filters not supporting keyword arguments
548 if not inspect.getargspec(fn)[2]:
559 if not inspect.getargspec(fn)[2]:
549 oldfn = fn
560 oldfn = fn
550 fn = lambda s, c, **kwargs: oldfn(s, c)
561 fn = lambda s, c, **kwargs: oldfn(s, c)
551 l.append((mf, fn, params))
562 l.append((mf, fn, params))
552 self.filterpats[filter] = l
563 self.filterpats[filter] = l
553
564
554 for mf, fn, cmd in self.filterpats[filter]:
565 for mf, fn, cmd in self.filterpats[filter]:
555 if mf(filename):
566 if mf(filename):
556 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
567 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
557 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
568 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
558 break
569 break
559
570
560 return data
571 return data
561
572
562 def adddatafilter(self, name, filter):
573 def adddatafilter(self, name, filter):
563 self._datafilters[name] = filter
574 self._datafilters[name] = filter
564
575
565 def wread(self, filename):
576 def wread(self, filename):
566 if self._link(filename):
577 if self._link(filename):
567 data = os.readlink(self.wjoin(filename))
578 data = os.readlink(self.wjoin(filename))
568 else:
579 else:
569 data = self.wopener(filename, 'r').read()
580 data = self.wopener(filename, 'r').read()
570 return self._filter("encode", filename, data)
581 return self._filter("encode", filename, data)
571
582
572 def wwrite(self, filename, data, flags):
583 def wwrite(self, filename, data, flags):
573 data = self._filter("decode", filename, data)
584 data = self._filter("decode", filename, data)
574 try:
585 try:
575 os.unlink(self.wjoin(filename))
586 os.unlink(self.wjoin(filename))
576 except OSError:
587 except OSError:
577 pass
588 pass
578 if 'l' in flags:
589 if 'l' in flags:
579 self.wopener.symlink(data, filename)
590 self.wopener.symlink(data, filename)
580 else:
591 else:
581 self.wopener(filename, 'w').write(data)
592 self.wopener(filename, 'w').write(data)
582 if 'x' in flags:
593 if 'x' in flags:
583 util.set_flags(self.wjoin(filename), False, True)
594 util.set_flags(self.wjoin(filename), False, True)
584
595
585 def wwritedata(self, filename, data):
596 def wwritedata(self, filename, data):
586 return self._filter("decode", filename, data)
597 return self._filter("decode", filename, data)
587
598
588 def transaction(self):
599 def transaction(self):
589 tr = self._transref and self._transref() or None
600 tr = self._transref and self._transref() or None
590 if tr and tr.running():
601 if tr and tr.running():
591 return tr.nest()
602 return tr.nest()
592
603
593 # abort here if the journal already exists
604 # abort here if the journal already exists
594 if os.path.exists(self.sjoin("journal")):
605 if os.path.exists(self.sjoin("journal")):
595 raise error.RepoError(_("journal already exists - run hg recover"))
606 raise error.RepoError(_("journal already exists - run hg recover"))
596
607
597 # save dirstate for rollback
608 # save dirstate for rollback
598 try:
609 try:
599 ds = self.opener("dirstate").read()
610 ds = self.opener("dirstate").read()
600 except IOError:
611 except IOError:
601 ds = ""
612 ds = ""
602 self.opener("journal.dirstate", "w").write(ds)
613 self.opener("journal.dirstate", "w").write(ds)
603 self.opener("journal.branch", "w").write(self.dirstate.branch())
614 self.opener("journal.branch", "w").write(self.dirstate.branch())
604
615
605 renames = [(self.sjoin("journal"), self.sjoin("undo")),
616 renames = [(self.sjoin("journal"), self.sjoin("undo")),
606 (self.join("journal.dirstate"), self.join("undo.dirstate")),
617 (self.join("journal.dirstate"), self.join("undo.dirstate")),
607 (self.join("journal.branch"), self.join("undo.branch"))]
618 (self.join("journal.branch"), self.join("undo.branch"))]
608 tr = transaction.transaction(self.ui.warn, self.sopener,
619 tr = transaction.transaction(self.ui.warn, self.sopener,
609 self.sjoin("journal"),
620 self.sjoin("journal"),
610 aftertrans(renames),
621 aftertrans(renames),
611 self.store.createmode)
622 self.store.createmode)
612 self._transref = weakref.ref(tr)
623 self._transref = weakref.ref(tr)
613 return tr
624 return tr
614
625
615 def recover(self):
626 def recover(self):
616 lock = self.lock()
627 lock = self.lock()
617 try:
628 try:
618 if os.path.exists(self.sjoin("journal")):
629 if os.path.exists(self.sjoin("journal")):
619 self.ui.status(_("rolling back interrupted transaction\n"))
630 self.ui.status(_("rolling back interrupted transaction\n"))
620 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
631 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
621 self.invalidate()
632 self.invalidate()
622 return True
633 return True
623 else:
634 else:
624 self.ui.warn(_("no interrupted transaction available\n"))
635 self.ui.warn(_("no interrupted transaction available\n"))
625 return False
636 return False
626 finally:
637 finally:
627 lock.release()
638 lock.release()
628
639
629 def rollback(self):
640 def rollback(self):
630 wlock = lock = None
641 wlock = lock = None
631 try:
642 try:
632 wlock = self.wlock()
643 wlock = self.wlock()
633 lock = self.lock()
644 lock = self.lock()
634 if os.path.exists(self.sjoin("undo")):
645 if os.path.exists(self.sjoin("undo")):
635 self.ui.status(_("rolling back last transaction\n"))
646 self.ui.status(_("rolling back last transaction\n"))
636 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
647 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
637 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
648 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
638 try:
649 try:
639 branch = self.opener("undo.branch").read()
650 branch = self.opener("undo.branch").read()
640 self.dirstate.setbranch(branch)
651 self.dirstate.setbranch(branch)
641 except IOError:
652 except IOError:
642 self.ui.warn(_("Named branch could not be reset, "
653 self.ui.warn(_("Named branch could not be reset, "
643 "current branch still is: %s\n")
654 "current branch still is: %s\n")
644 % encoding.tolocal(self.dirstate.branch()))
655 % encoding.tolocal(self.dirstate.branch()))
645 self.invalidate()
656 self.invalidate()
646 self.dirstate.invalidate()
657 self.dirstate.invalidate()
647 else:
658 else:
648 self.ui.warn(_("no rollback information available\n"))
659 self.ui.warn(_("no rollback information available\n"))
649 finally:
660 finally:
650 release(lock, wlock)
661 release(lock, wlock)
651
662
652 def invalidate(self):
663 def invalidate(self):
653 for a in "changelog manifest".split():
664 for a in "changelog manifest".split():
654 if a in self.__dict__:
665 if a in self.__dict__:
655 delattr(self, a)
666 delattr(self, a)
656 self.tagscache = None
667 self.tagscache = None
657 self._tagstypecache = None
668 self._tagstypecache = None
658 self.nodetagscache = None
669 self.nodetagscache = None
659 self.branchcache = None
670 self.branchcache = None
660 self._ubranchcache = None
671 self._ubranchcache = None
661 self._branchcachetip = None
672 self._branchcachetip = None
662
673
663 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
674 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
664 try:
675 try:
665 l = lock.lock(lockname, 0, releasefn, desc=desc)
676 l = lock.lock(lockname, 0, releasefn, desc=desc)
666 except error.LockHeld, inst:
677 except error.LockHeld, inst:
667 if not wait:
678 if not wait:
668 raise
679 raise
669 self.ui.warn(_("waiting for lock on %s held by %r\n") %
680 self.ui.warn(_("waiting for lock on %s held by %r\n") %
670 (desc, inst.locker))
681 (desc, inst.locker))
671 # default to 600 seconds timeout
682 # default to 600 seconds timeout
672 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
683 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
673 releasefn, desc=desc)
684 releasefn, desc=desc)
674 if acquirefn:
685 if acquirefn:
675 acquirefn()
686 acquirefn()
676 return l
687 return l
677
688
678 def lock(self, wait=True):
689 def lock(self, wait=True):
679 l = self._lockref and self._lockref()
690 l = self._lockref and self._lockref()
680 if l is not None and l.held:
691 if l is not None and l.held:
681 l.lock()
692 l.lock()
682 return l
693 return l
683
694
684 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
695 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
685 _('repository %s') % self.origroot)
696 _('repository %s') % self.origroot)
686 self._lockref = weakref.ref(l)
697 self._lockref = weakref.ref(l)
687 return l
698 return l
688
699
689 def wlock(self, wait=True):
700 def wlock(self, wait=True):
690 l = self._wlockref and self._wlockref()
701 l = self._wlockref and self._wlockref()
691 if l is not None and l.held:
702 if l is not None and l.held:
692 l.lock()
703 l.lock()
693 return l
704 return l
694
705
695 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
706 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
696 self.dirstate.invalidate, _('working directory of %s') %
707 self.dirstate.invalidate, _('working directory of %s') %
697 self.origroot)
708 self.origroot)
698 self._wlockref = weakref.ref(l)
709 self._wlockref = weakref.ref(l)
699 return l
710 return l
700
711
701 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
712 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
702 """
713 """
703 commit an individual file as part of a larger transaction
714 commit an individual file as part of a larger transaction
704 """
715 """
705
716
706 fname = fctx.path()
717 fname = fctx.path()
707 text = fctx.data()
718 text = fctx.data()
708 flog = self.file(fname)
719 flog = self.file(fname)
709 fparent1 = manifest1.get(fname, nullid)
720 fparent1 = manifest1.get(fname, nullid)
710 fparent2 = fparent2o = manifest2.get(fname, nullid)
721 fparent2 = fparent2o = manifest2.get(fname, nullid)
711
722
712 meta = {}
723 meta = {}
713 copy = fctx.renamed()
724 copy = fctx.renamed()
714 if copy and copy[0] != fname:
725 if copy and copy[0] != fname:
715 # Mark the new revision of this file as a copy of another
726 # Mark the new revision of this file as a copy of another
716 # file. This copy data will effectively act as a parent
727 # file. This copy data will effectively act as a parent
717 # of this new revision. If this is a merge, the first
728 # of this new revision. If this is a merge, the first
718 # parent will be the nullid (meaning "look up the copy data")
729 # parent will be the nullid (meaning "look up the copy data")
719 # and the second one will be the other parent. For example:
730 # and the second one will be the other parent. For example:
720 #
731 #
721 # 0 --- 1 --- 3 rev1 changes file foo
732 # 0 --- 1 --- 3 rev1 changes file foo
722 # \ / rev2 renames foo to bar and changes it
733 # \ / rev2 renames foo to bar and changes it
723 # \- 2 -/ rev3 should have bar with all changes and
734 # \- 2 -/ rev3 should have bar with all changes and
724 # should record that bar descends from
735 # should record that bar descends from
725 # bar in rev2 and foo in rev1
736 # bar in rev2 and foo in rev1
726 #
737 #
727 # this allows this merge to succeed:
738 # this allows this merge to succeed:
728 #
739 #
729 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
740 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
730 # \ / merging rev3 and rev4 should use bar@rev2
741 # \ / merging rev3 and rev4 should use bar@rev2
731 # \- 2 --- 4 as the merge base
742 # \- 2 --- 4 as the merge base
732 #
743 #
733
744
734 cfname = copy[0]
745 cfname = copy[0]
735 crev = manifest1.get(cfname)
746 crev = manifest1.get(cfname)
736 newfparent = fparent2
747 newfparent = fparent2
737
748
738 if manifest2: # branch merge
749 if manifest2: # branch merge
739 if fparent2 == nullid or crev is None: # copied on remote side
750 if fparent2 == nullid or crev is None: # copied on remote side
740 if cfname in manifest2:
751 if cfname in manifest2:
741 crev = manifest2[cfname]
752 crev = manifest2[cfname]
742 newfparent = fparent1
753 newfparent = fparent1
743
754
744 # find source in nearest ancestor if we've lost track
755 # find source in nearest ancestor if we've lost track
745 if not crev:
756 if not crev:
746 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
757 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
747 (fname, cfname))
758 (fname, cfname))
748 for ancestor in self['.'].ancestors():
759 for ancestor in self['.'].ancestors():
749 if cfname in ancestor:
760 if cfname in ancestor:
750 crev = ancestor[cfname].filenode()
761 crev = ancestor[cfname].filenode()
751 break
762 break
752
763
753 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
764 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
754 meta["copy"] = cfname
765 meta["copy"] = cfname
755 meta["copyrev"] = hex(crev)
766 meta["copyrev"] = hex(crev)
756 fparent1, fparent2 = nullid, newfparent
767 fparent1, fparent2 = nullid, newfparent
757 elif fparent2 != nullid:
768 elif fparent2 != nullid:
758 # is one parent an ancestor of the other?
769 # is one parent an ancestor of the other?
759 fparentancestor = flog.ancestor(fparent1, fparent2)
770 fparentancestor = flog.ancestor(fparent1, fparent2)
760 if fparentancestor == fparent1:
771 if fparentancestor == fparent1:
761 fparent1, fparent2 = fparent2, nullid
772 fparent1, fparent2 = fparent2, nullid
762 elif fparentancestor == fparent2:
773 elif fparentancestor == fparent2:
763 fparent2 = nullid
774 fparent2 = nullid
764
775
765 # is the file changed?
776 # is the file changed?
766 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
777 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
767 changelist.append(fname)
778 changelist.append(fname)
768 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
779 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
769
780
770 # are just the flags changed during merge?
781 # are just the flags changed during merge?
771 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
782 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
772 changelist.append(fname)
783 changelist.append(fname)
773
784
774 return fparent1
785 return fparent1
775
786
776 def commit(self, text="", user=None, date=None, match=None, force=False,
787 def commit(self, text="", user=None, date=None, match=None, force=False,
777 editor=False, extra={}):
788 editor=False, extra={}):
778 """Add a new revision to current repository.
789 """Add a new revision to current repository.
779
790
780 Revision information is gathered from the working directory,
791 Revision information is gathered from the working directory,
781 match can be used to filter the committed files. If editor is
792 match can be used to filter the committed files. If editor is
782 supplied, it is called to get a commit message.
793 supplied, it is called to get a commit message.
783 """
794 """
784
795
785 def fail(f, msg):
796 def fail(f, msg):
786 raise util.Abort('%s: %s' % (f, msg))
797 raise util.Abort('%s: %s' % (f, msg))
787
798
788 if not match:
799 if not match:
789 match = match_.always(self.root, '')
800 match = match_.always(self.root, '')
790
801
791 if not force:
802 if not force:
792 vdirs = []
803 vdirs = []
793 match.dir = vdirs.append
804 match.dir = vdirs.append
794 match.bad = fail
805 match.bad = fail
795
806
796 wlock = self.wlock()
807 wlock = self.wlock()
797 try:
808 try:
798 p1, p2 = self.dirstate.parents()
809 p1, p2 = self.dirstate.parents()
799
810
800 if (not force and p2 != nullid and match and
811 if (not force and p2 != nullid and match and
801 (match.files() or match.anypats())):
812 (match.files() or match.anypats())):
802 raise util.Abort(_('cannot partially commit a merge '
813 raise util.Abort(_('cannot partially commit a merge '
803 '(do not specify files or patterns)'))
814 '(do not specify files or patterns)'))
804
815
805 changes = self.status(match=match, clean=force)
816 changes = self.status(match=match, clean=force)
806 if force:
817 if force:
807 changes[0].extend(changes[6]) # mq may commit unchanged files
818 changes[0].extend(changes[6]) # mq may commit unchanged files
808
819
809 # make sure all explicit patterns are matched
820 # make sure all explicit patterns are matched
810 if not force and match.files():
821 if not force and match.files():
811 matched = set(changes[0] + changes[1] + changes[2])
822 matched = set(changes[0] + changes[1] + changes[2])
812
823
813 for f in match.files():
824 for f in match.files():
814 if f == '.' or f in matched: # matched
825 if f == '.' or f in matched: # matched
815 continue
826 continue
816 if f in changes[3]: # missing
827 if f in changes[3]: # missing
817 fail(f, _('file not found!'))
828 fail(f, _('file not found!'))
818 if f in vdirs: # visited directory
829 if f in vdirs: # visited directory
819 d = f + '/'
830 d = f + '/'
820 for mf in matched:
831 for mf in matched:
821 if mf.startswith(d):
832 if mf.startswith(d):
822 break
833 break
823 else:
834 else:
824 fail(f, _("no match under directory!"))
835 fail(f, _("no match under directory!"))
825 elif f not in self.dirstate:
836 elif f not in self.dirstate:
826 fail(f, _("file not tracked!"))
837 fail(f, _("file not tracked!"))
827
838
828 if (not force and not extra.get("close") and p2 == nullid
839 if (not force and not extra.get("close") and p2 == nullid
829 and not (changes[0] or changes[1] or changes[2])
840 and not (changes[0] or changes[1] or changes[2])
830 and self[None].branch() == self['.'].branch()):
841 and self[None].branch() == self['.'].branch()):
831 self.ui.status(_("nothing changed\n"))
842 self.ui.status(_("nothing changed\n"))
832 return None
843 return None
833
844
834 ms = merge_.mergestate(self)
845 ms = merge_.mergestate(self)
835 for f in changes[0]:
846 for f in changes[0]:
836 if f in ms and ms[f] == 'u':
847 if f in ms and ms[f] == 'u':
837 raise util.Abort(_("unresolved merge conflicts "
848 raise util.Abort(_("unresolved merge conflicts "
838 "(see hg resolve)"))
849 "(see hg resolve)"))
839
850
840 cctx = context.workingctx(self, (p1, p2), text, user, date,
851 cctx = context.workingctx(self, (p1, p2), text, user, date,
841 extra, changes)
852 extra, changes)
842 if editor:
853 if editor:
843 cctx._text = editor(self, cctx)
854 cctx._text = editor(self, cctx)
844 ret = self.commitctx(cctx, True)
855 ret = self.commitctx(cctx, True)
845
856
846 # update dirstate and mergestate
857 # update dirstate and mergestate
847 for f in changes[0] + changes[1]:
858 for f in changes[0] + changes[1]:
848 self.dirstate.normal(f)
859 self.dirstate.normal(f)
849 for f in changes[2]:
860 for f in changes[2]:
850 self.dirstate.forget(f)
861 self.dirstate.forget(f)
851 self.dirstate.setparents(ret)
862 self.dirstate.setparents(ret)
852 ms.reset()
863 ms.reset()
853
864
854 return ret
865 return ret
855
866
856 finally:
867 finally:
857 wlock.release()
868 wlock.release()
858
869
859 def commitctx(self, ctx, error=False):
870 def commitctx(self, ctx, error=False):
860 """Add a new revision to current repository.
871 """Add a new revision to current repository.
861
872
862 Revision information is passed via the context argument.
873 Revision information is passed via the context argument.
863 """
874 """
864
875
865 tr = lock = None
876 tr = lock = None
866 removed = ctx.removed()
877 removed = ctx.removed()
867 p1, p2 = ctx.p1(), ctx.p2()
878 p1, p2 = ctx.p1(), ctx.p2()
868 m1 = p1.manifest().copy()
879 m1 = p1.manifest().copy()
869 m2 = p2.manifest()
880 m2 = p2.manifest()
870 user = ctx.user()
881 user = ctx.user()
871
882
872 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
883 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
873 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
884 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
874
885
875 lock = self.lock()
886 lock = self.lock()
876 try:
887 try:
877 tr = self.transaction()
888 tr = self.transaction()
878 trp = weakref.proxy(tr)
889 trp = weakref.proxy(tr)
879
890
880 # check in files
891 # check in files
881 new = {}
892 new = {}
882 changed = []
893 changed = []
883 linkrev = len(self)
894 linkrev = len(self)
884 for f in sorted(ctx.modified() + ctx.added()):
895 for f in sorted(ctx.modified() + ctx.added()):
885 self.ui.note(f + "\n")
896 self.ui.note(f + "\n")
886 try:
897 try:
887 fctx = ctx[f]
898 fctx = ctx[f]
888 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
899 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
889 changed)
900 changed)
890 m1.set(f, fctx.flags())
901 m1.set(f, fctx.flags())
891 except (OSError, IOError):
902 except (OSError, IOError):
892 if error:
903 if error:
893 self.ui.warn(_("trouble committing %s!\n") % f)
904 self.ui.warn(_("trouble committing %s!\n") % f)
894 raise
905 raise
895 else:
906 else:
896 removed.append(f)
907 removed.append(f)
897
908
898 # update manifest
909 # update manifest
899 m1.update(new)
910 m1.update(new)
900 removed = [f for f in sorted(removed) if f in m1 or f in m2]
911 removed = [f for f in sorted(removed) if f in m1 or f in m2]
901 drop = [f for f in removed if f in m1]
912 drop = [f for f in removed if f in m1]
902 for f in drop:
913 for f in drop:
903 del m1[f]
914 del m1[f]
904 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
915 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
905 p2.manifestnode(), (new, drop))
916 p2.manifestnode(), (new, drop))
906
917
907 # update changelog
918 # update changelog
908 self.changelog.delayupdate()
919 self.changelog.delayupdate()
909 n = self.changelog.add(mn, changed + removed, ctx.description(),
920 n = self.changelog.add(mn, changed + removed, ctx.description(),
910 trp, p1.node(), p2.node(),
921 trp, p1.node(), p2.node(),
911 user, ctx.date(), ctx.extra().copy())
922 user, ctx.date(), ctx.extra().copy())
912 p = lambda: self.changelog.writepending() and self.root or ""
923 p = lambda: self.changelog.writepending() and self.root or ""
913 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
924 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
914 parent2=xp2, pending=p)
925 parent2=xp2, pending=p)
915 self.changelog.finalize(trp)
926 self.changelog.finalize(trp)
916 tr.close()
927 tr.close()
917
928
918 if self.branchcache:
929 if self.branchcache:
919 self.branchtags()
930 self.branchtags()
920
931
921 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
932 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
922 return n
933 return n
923 finally:
934 finally:
924 del tr
935 del tr
925 lock.release()
936 lock.release()
926
937
927 def walk(self, match, node=None):
938 def walk(self, match, node=None):
928 '''
939 '''
929 walk recursively through the directory tree or a given
940 walk recursively through the directory tree or a given
930 changeset, finding all files matched by the match
941 changeset, finding all files matched by the match
931 function
942 function
932 '''
943 '''
933 return self[node].walk(match)
944 return self[node].walk(match)
934
945
935 def status(self, node1='.', node2=None, match=None,
946 def status(self, node1='.', node2=None, match=None,
936 ignored=False, clean=False, unknown=False):
947 ignored=False, clean=False, unknown=False):
937 """return status of files between two nodes or node and working directory
948 """return status of files between two nodes or node and working directory
938
949
939 If node1 is None, use the first dirstate parent instead.
950 If node1 is None, use the first dirstate parent instead.
940 If node2 is None, compare node1 with working directory.
951 If node2 is None, compare node1 with working directory.
941 """
952 """
942
953
943 def mfmatches(ctx):
954 def mfmatches(ctx):
944 mf = ctx.manifest().copy()
955 mf = ctx.manifest().copy()
945 for fn in mf.keys():
956 for fn in mf.keys():
946 if not match(fn):
957 if not match(fn):
947 del mf[fn]
958 del mf[fn]
948 return mf
959 return mf
949
960
950 if isinstance(node1, context.changectx):
961 if isinstance(node1, context.changectx):
951 ctx1 = node1
962 ctx1 = node1
952 else:
963 else:
953 ctx1 = self[node1]
964 ctx1 = self[node1]
954 if isinstance(node2, context.changectx):
965 if isinstance(node2, context.changectx):
955 ctx2 = node2
966 ctx2 = node2
956 else:
967 else:
957 ctx2 = self[node2]
968 ctx2 = self[node2]
958
969
959 working = ctx2.rev() is None
970 working = ctx2.rev() is None
960 parentworking = working and ctx1 == self['.']
971 parentworking = working and ctx1 == self['.']
961 match = match or match_.always(self.root, self.getcwd())
972 match = match or match_.always(self.root, self.getcwd())
962 listignored, listclean, listunknown = ignored, clean, unknown
973 listignored, listclean, listunknown = ignored, clean, unknown
963
974
964 # load earliest manifest first for caching reasons
975 # load earliest manifest first for caching reasons
965 if not working and ctx2.rev() < ctx1.rev():
976 if not working and ctx2.rev() < ctx1.rev():
966 ctx2.manifest()
977 ctx2.manifest()
967
978
968 if not parentworking:
979 if not parentworking:
969 def bad(f, msg):
980 def bad(f, msg):
970 if f not in ctx1:
981 if f not in ctx1:
971 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
982 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
972 match.bad = bad
983 match.bad = bad
973
984
974 if working: # we need to scan the working dir
985 if working: # we need to scan the working dir
975 s = self.dirstate.status(match, listignored, listclean, listunknown)
986 s = self.dirstate.status(match, listignored, listclean, listunknown)
976 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
987 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
977
988
978 # check for any possibly clean files
989 # check for any possibly clean files
979 if parentworking and cmp:
990 if parentworking and cmp:
980 fixup = []
991 fixup = []
981 # do a full compare of any files that might have changed
992 # do a full compare of any files that might have changed
982 for f in sorted(cmp):
993 for f in sorted(cmp):
983 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
994 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
984 or ctx1[f].cmp(ctx2[f].data())):
995 or ctx1[f].cmp(ctx2[f].data())):
985 modified.append(f)
996 modified.append(f)
986 else:
997 else:
987 fixup.append(f)
998 fixup.append(f)
988
999
989 if listclean:
1000 if listclean:
990 clean += fixup
1001 clean += fixup
991
1002
992 # update dirstate for files that are actually clean
1003 # update dirstate for files that are actually clean
993 if fixup:
1004 if fixup:
994 try:
1005 try:
995 # updating the dirstate is optional
1006 # updating the dirstate is optional
996 # so we don't wait on the lock
1007 # so we don't wait on the lock
997 wlock = self.wlock(False)
1008 wlock = self.wlock(False)
998 try:
1009 try:
999 for f in fixup:
1010 for f in fixup:
1000 self.dirstate.normal(f)
1011 self.dirstate.normal(f)
1001 finally:
1012 finally:
1002 wlock.release()
1013 wlock.release()
1003 except error.LockError:
1014 except error.LockError:
1004 pass
1015 pass
1005
1016
1006 if not parentworking:
1017 if not parentworking:
1007 mf1 = mfmatches(ctx1)
1018 mf1 = mfmatches(ctx1)
1008 if working:
1019 if working:
1009 # we are comparing working dir against non-parent
1020 # we are comparing working dir against non-parent
1010 # generate a pseudo-manifest for the working dir
1021 # generate a pseudo-manifest for the working dir
1011 mf2 = mfmatches(self['.'])
1022 mf2 = mfmatches(self['.'])
1012 for f in cmp + modified + added:
1023 for f in cmp + modified + added:
1013 mf2[f] = None
1024 mf2[f] = None
1014 mf2.set(f, ctx2.flags(f))
1025 mf2.set(f, ctx2.flags(f))
1015 for f in removed:
1026 for f in removed:
1016 if f in mf2:
1027 if f in mf2:
1017 del mf2[f]
1028 del mf2[f]
1018 else:
1029 else:
1019 # we are comparing two revisions
1030 # we are comparing two revisions
1020 deleted, unknown, ignored = [], [], []
1031 deleted, unknown, ignored = [], [], []
1021 mf2 = mfmatches(ctx2)
1032 mf2 = mfmatches(ctx2)
1022
1033
1023 modified, added, clean = [], [], []
1034 modified, added, clean = [], [], []
1024 for fn in mf2:
1035 for fn in mf2:
1025 if fn in mf1:
1036 if fn in mf1:
1026 if (mf1.flags(fn) != mf2.flags(fn) or
1037 if (mf1.flags(fn) != mf2.flags(fn) or
1027 (mf1[fn] != mf2[fn] and
1038 (mf1[fn] != mf2[fn] and
1028 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1039 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1029 modified.append(fn)
1040 modified.append(fn)
1030 elif listclean:
1041 elif listclean:
1031 clean.append(fn)
1042 clean.append(fn)
1032 del mf1[fn]
1043 del mf1[fn]
1033 else:
1044 else:
1034 added.append(fn)
1045 added.append(fn)
1035 removed = mf1.keys()
1046 removed = mf1.keys()
1036
1047
1037 r = modified, added, removed, deleted, unknown, ignored, clean
1048 r = modified, added, removed, deleted, unknown, ignored, clean
1038 [l.sort() for l in r]
1049 [l.sort() for l in r]
1039 return r
1050 return r
1040
1051
1041 def add(self, list):
1052 def add(self, list):
1042 wlock = self.wlock()
1053 wlock = self.wlock()
1043 try:
1054 try:
1044 rejected = []
1055 rejected = []
1045 for f in list:
1056 for f in list:
1046 p = self.wjoin(f)
1057 p = self.wjoin(f)
1047 try:
1058 try:
1048 st = os.lstat(p)
1059 st = os.lstat(p)
1049 except:
1060 except:
1050 self.ui.warn(_("%s does not exist!\n") % f)
1061 self.ui.warn(_("%s does not exist!\n") % f)
1051 rejected.append(f)
1062 rejected.append(f)
1052 continue
1063 continue
1053 if st.st_size > 10000000:
1064 if st.st_size > 10000000:
1054 self.ui.warn(_("%s: files over 10MB may cause memory and"
1065 self.ui.warn(_("%s: files over 10MB may cause memory and"
1055 " performance problems\n"
1066 " performance problems\n"
1056 "(use 'hg revert %s' to unadd the file)\n")
1067 "(use 'hg revert %s' to unadd the file)\n")
1057 % (f, f))
1068 % (f, f))
1058 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1069 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1059 self.ui.warn(_("%s not added: only files and symlinks "
1070 self.ui.warn(_("%s not added: only files and symlinks "
1060 "supported currently\n") % f)
1071 "supported currently\n") % f)
1061 rejected.append(p)
1072 rejected.append(p)
1062 elif self.dirstate[f] in 'amn':
1073 elif self.dirstate[f] in 'amn':
1063 self.ui.warn(_("%s already tracked!\n") % f)
1074 self.ui.warn(_("%s already tracked!\n") % f)
1064 elif self.dirstate[f] == 'r':
1075 elif self.dirstate[f] == 'r':
1065 self.dirstate.normallookup(f)
1076 self.dirstate.normallookup(f)
1066 else:
1077 else:
1067 self.dirstate.add(f)
1078 self.dirstate.add(f)
1068 return rejected
1079 return rejected
1069 finally:
1080 finally:
1070 wlock.release()
1081 wlock.release()
1071
1082
1072 def forget(self, list):
1083 def forget(self, list):
1073 wlock = self.wlock()
1084 wlock = self.wlock()
1074 try:
1085 try:
1075 for f in list:
1086 for f in list:
1076 if self.dirstate[f] != 'a':
1087 if self.dirstate[f] != 'a':
1077 self.ui.warn(_("%s not added!\n") % f)
1088 self.ui.warn(_("%s not added!\n") % f)
1078 else:
1089 else:
1079 self.dirstate.forget(f)
1090 self.dirstate.forget(f)
1080 finally:
1091 finally:
1081 wlock.release()
1092 wlock.release()
1082
1093
1083 def remove(self, list, unlink=False):
1094 def remove(self, list, unlink=False):
1084 if unlink:
1095 if unlink:
1085 for f in list:
1096 for f in list:
1086 try:
1097 try:
1087 util.unlink(self.wjoin(f))
1098 util.unlink(self.wjoin(f))
1088 except OSError, inst:
1099 except OSError, inst:
1089 if inst.errno != errno.ENOENT:
1100 if inst.errno != errno.ENOENT:
1090 raise
1101 raise
1091 wlock = self.wlock()
1102 wlock = self.wlock()
1092 try:
1103 try:
1093 for f in list:
1104 for f in list:
1094 if unlink and os.path.exists(self.wjoin(f)):
1105 if unlink and os.path.exists(self.wjoin(f)):
1095 self.ui.warn(_("%s still exists!\n") % f)
1106 self.ui.warn(_("%s still exists!\n") % f)
1096 elif self.dirstate[f] == 'a':
1107 elif self.dirstate[f] == 'a':
1097 self.dirstate.forget(f)
1108 self.dirstate.forget(f)
1098 elif f not in self.dirstate:
1109 elif f not in self.dirstate:
1099 self.ui.warn(_("%s not tracked!\n") % f)
1110 self.ui.warn(_("%s not tracked!\n") % f)
1100 else:
1111 else:
1101 self.dirstate.remove(f)
1112 self.dirstate.remove(f)
1102 finally:
1113 finally:
1103 wlock.release()
1114 wlock.release()
1104
1115
1105 def undelete(self, list):
1116 def undelete(self, list):
1106 manifests = [self.manifest.read(self.changelog.read(p)[0])
1117 manifests = [self.manifest.read(self.changelog.read(p)[0])
1107 for p in self.dirstate.parents() if p != nullid]
1118 for p in self.dirstate.parents() if p != nullid]
1108 wlock = self.wlock()
1119 wlock = self.wlock()
1109 try:
1120 try:
1110 for f in list:
1121 for f in list:
1111 if self.dirstate[f] != 'r':
1122 if self.dirstate[f] != 'r':
1112 self.ui.warn(_("%s not removed!\n") % f)
1123 self.ui.warn(_("%s not removed!\n") % f)
1113 else:
1124 else:
1114 m = f in manifests[0] and manifests[0] or manifests[1]
1125 m = f in manifests[0] and manifests[0] or manifests[1]
1115 t = self.file(f).read(m[f])
1126 t = self.file(f).read(m[f])
1116 self.wwrite(f, t, m.flags(f))
1127 self.wwrite(f, t, m.flags(f))
1117 self.dirstate.normal(f)
1128 self.dirstate.normal(f)
1118 finally:
1129 finally:
1119 wlock.release()
1130 wlock.release()
1120
1131
1121 def copy(self, source, dest):
1132 def copy(self, source, dest):
1122 p = self.wjoin(dest)
1133 p = self.wjoin(dest)
1123 if not (os.path.exists(p) or os.path.islink(p)):
1134 if not (os.path.exists(p) or os.path.islink(p)):
1124 self.ui.warn(_("%s does not exist!\n") % dest)
1135 self.ui.warn(_("%s does not exist!\n") % dest)
1125 elif not (os.path.isfile(p) or os.path.islink(p)):
1136 elif not (os.path.isfile(p) or os.path.islink(p)):
1126 self.ui.warn(_("copy failed: %s is not a file or a "
1137 self.ui.warn(_("copy failed: %s is not a file or a "
1127 "symbolic link\n") % dest)
1138 "symbolic link\n") % dest)
1128 else:
1139 else:
1129 wlock = self.wlock()
1140 wlock = self.wlock()
1130 try:
1141 try:
1131 if self.dirstate[dest] in '?r':
1142 if self.dirstate[dest] in '?r':
1132 self.dirstate.add(dest)
1143 self.dirstate.add(dest)
1133 self.dirstate.copy(source, dest)
1144 self.dirstate.copy(source, dest)
1134 finally:
1145 finally:
1135 wlock.release()
1146 wlock.release()
1136
1147
1137 def heads(self, start=None):
1148 def heads(self, start=None):
1138 heads = self.changelog.heads(start)
1149 heads = self.changelog.heads(start)
1139 # sort the output in rev descending order
1150 # sort the output in rev descending order
1140 heads = [(-self.changelog.rev(h), h) for h in heads]
1151 heads = [(-self.changelog.rev(h), h) for h in heads]
1141 return [n for (r, n) in sorted(heads)]
1152 return [n for (r, n) in sorted(heads)]
1142
1153
1143 def branchheads(self, branch=None, start=None, closed=False):
1154 def branchheads(self, branch=None, start=None, closed=False):
1144 if branch is None:
1155 if branch is None:
1145 branch = self[None].branch()
1156 branch = self[None].branch()
1146 branches = self.branchmap()
1157 branches = self.branchmap()
1147 if branch not in branches:
1158 if branch not in branches:
1148 return []
1159 return []
1149 bheads = branches[branch]
1160 bheads = branches[branch]
1150 # the cache returns heads ordered lowest to highest
1161 # the cache returns heads ordered lowest to highest
1151 bheads.reverse()
1162 bheads.reverse()
1152 if start is not None:
1163 if start is not None:
1153 # filter out the heads that cannot be reached from startrev
1164 # filter out the heads that cannot be reached from startrev
1154 bheads = self.changelog.nodesbetween([start], bheads)[2]
1165 bheads = self.changelog.nodesbetween([start], bheads)[2]
1155 if not closed:
1166 if not closed:
1156 bheads = [h for h in bheads if
1167 bheads = [h for h in bheads if
1157 ('close' not in self.changelog.read(h)[5])]
1168 ('close' not in self.changelog.read(h)[5])]
1158 return bheads
1169 return bheads
1159
1170
1160 def branches(self, nodes):
1171 def branches(self, nodes):
1161 if not nodes:
1172 if not nodes:
1162 nodes = [self.changelog.tip()]
1173 nodes = [self.changelog.tip()]
1163 b = []
1174 b = []
1164 for n in nodes:
1175 for n in nodes:
1165 t = n
1176 t = n
1166 while 1:
1177 while 1:
1167 p = self.changelog.parents(n)
1178 p = self.changelog.parents(n)
1168 if p[1] != nullid or p[0] == nullid:
1179 if p[1] != nullid or p[0] == nullid:
1169 b.append((t, n, p[0], p[1]))
1180 b.append((t, n, p[0], p[1]))
1170 break
1181 break
1171 n = p[0]
1182 n = p[0]
1172 return b
1183 return b
1173
1184
1174 def between(self, pairs):
1185 def between(self, pairs):
1175 r = []
1186 r = []
1176
1187
1177 for top, bottom in pairs:
1188 for top, bottom in pairs:
1178 n, l, i = top, [], 0
1189 n, l, i = top, [], 0
1179 f = 1
1190 f = 1
1180
1191
1181 while n != bottom and n != nullid:
1192 while n != bottom and n != nullid:
1182 p = self.changelog.parents(n)[0]
1193 p = self.changelog.parents(n)[0]
1183 if i == f:
1194 if i == f:
1184 l.append(n)
1195 l.append(n)
1185 f = f * 2
1196 f = f * 2
1186 n = p
1197 n = p
1187 i += 1
1198 i += 1
1188
1199
1189 r.append(l)
1200 r.append(l)
1190
1201
1191 return r
1202 return r
1192
1203
1193 def findincoming(self, remote, base=None, heads=None, force=False):
1204 def findincoming(self, remote, base=None, heads=None, force=False):
1194 """Return list of roots of the subsets of missing nodes from remote
1205 """Return list of roots of the subsets of missing nodes from remote
1195
1206
1196 If base dict is specified, assume that these nodes and their parents
1207 If base dict is specified, assume that these nodes and their parents
1197 exist on the remote side and that no child of a node of base exists
1208 exist on the remote side and that no child of a node of base exists
1198 in both remote and self.
1209 in both remote and self.
1199 Furthermore base will be updated to include the nodes that exists
1210 Furthermore base will be updated to include the nodes that exists
1200 in self and remote but no children exists in self and remote.
1211 in self and remote but no children exists in self and remote.
1201 If a list of heads is specified, return only nodes which are heads
1212 If a list of heads is specified, return only nodes which are heads
1202 or ancestors of these heads.
1213 or ancestors of these heads.
1203
1214
1204 All the ancestors of base are in self and in remote.
1215 All the ancestors of base are in self and in remote.
1205 All the descendants of the list returned are missing in self.
1216 All the descendants of the list returned are missing in self.
1206 (and so we know that the rest of the nodes are missing in remote, see
1217 (and so we know that the rest of the nodes are missing in remote, see
1207 outgoing)
1218 outgoing)
1208 """
1219 """
1209 return self.findcommonincoming(remote, base, heads, force)[1]
1220 return self.findcommonincoming(remote, base, heads, force)[1]
1210
1221
1211 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1222 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1212 """Return a tuple (common, missing roots, heads) used to identify
1223 """Return a tuple (common, missing roots, heads) used to identify
1213 missing nodes from remote.
1224 missing nodes from remote.
1214
1225
1215 If base dict is specified, assume that these nodes and their parents
1226 If base dict is specified, assume that these nodes and their parents
1216 exist on the remote side and that no child of a node of base exists
1227 exist on the remote side and that no child of a node of base exists
1217 in both remote and self.
1228 in both remote and self.
1218 Furthermore base will be updated to include the nodes that exists
1229 Furthermore base will be updated to include the nodes that exists
1219 in self and remote but no children exists in self and remote.
1230 in self and remote but no children exists in self and remote.
1220 If a list of heads is specified, return only nodes which are heads
1231 If a list of heads is specified, return only nodes which are heads
1221 or ancestors of these heads.
1232 or ancestors of these heads.
1222
1233
1223 All the ancestors of base are in self and in remote.
1234 All the ancestors of base are in self and in remote.
1224 """
1235 """
1225 m = self.changelog.nodemap
1236 m = self.changelog.nodemap
1226 search = []
1237 search = []
1227 fetch = set()
1238 fetch = set()
1228 seen = set()
1239 seen = set()
1229 seenbranch = set()
1240 seenbranch = set()
1230 if base is None:
1241 if base is None:
1231 base = {}
1242 base = {}
1232
1243
1233 if not heads:
1244 if not heads:
1234 heads = remote.heads()
1245 heads = remote.heads()
1235
1246
1236 if self.changelog.tip() == nullid:
1247 if self.changelog.tip() == nullid:
1237 base[nullid] = 1
1248 base[nullid] = 1
1238 if heads != [nullid]:
1249 if heads != [nullid]:
1239 return [nullid], [nullid], list(heads)
1250 return [nullid], [nullid], list(heads)
1240 return [nullid], [], []
1251 return [nullid], [], []
1241
1252
1242 # assume we're closer to the tip than the root
1253 # assume we're closer to the tip than the root
1243 # and start by examining the heads
1254 # and start by examining the heads
1244 self.ui.status(_("searching for changes\n"))
1255 self.ui.status(_("searching for changes\n"))
1245
1256
1246 unknown = []
1257 unknown = []
1247 for h in heads:
1258 for h in heads:
1248 if h not in m:
1259 if h not in m:
1249 unknown.append(h)
1260 unknown.append(h)
1250 else:
1261 else:
1251 base[h] = 1
1262 base[h] = 1
1252
1263
1253 heads = unknown
1264 heads = unknown
1254 if not unknown:
1265 if not unknown:
1255 return base.keys(), [], []
1266 return base.keys(), [], []
1256
1267
1257 req = set(unknown)
1268 req = set(unknown)
1258 reqcnt = 0
1269 reqcnt = 0
1259
1270
1260 # search through remote branches
1271 # search through remote branches
1261 # a 'branch' here is a linear segment of history, with four parts:
1272 # a 'branch' here is a linear segment of history, with four parts:
1262 # head, root, first parent, second parent
1273 # head, root, first parent, second parent
1263 # (a branch always has two parents (or none) by definition)
1274 # (a branch always has two parents (or none) by definition)
1264 unknown = remote.branches(unknown)
1275 unknown = remote.branches(unknown)
1265 while unknown:
1276 while unknown:
1266 r = []
1277 r = []
1267 while unknown:
1278 while unknown:
1268 n = unknown.pop(0)
1279 n = unknown.pop(0)
1269 if n[0] in seen:
1280 if n[0] in seen:
1270 continue
1281 continue
1271
1282
1272 self.ui.debug(_("examining %s:%s\n")
1283 self.ui.debug(_("examining %s:%s\n")
1273 % (short(n[0]), short(n[1])))
1284 % (short(n[0]), short(n[1])))
1274 if n[0] == nullid: # found the end of the branch
1285 if n[0] == nullid: # found the end of the branch
1275 pass
1286 pass
1276 elif n in seenbranch:
1287 elif n in seenbranch:
1277 self.ui.debug(_("branch already found\n"))
1288 self.ui.debug(_("branch already found\n"))
1278 continue
1289 continue
1279 elif n[1] and n[1] in m: # do we know the base?
1290 elif n[1] and n[1] in m: # do we know the base?
1280 self.ui.debug(_("found incomplete branch %s:%s\n")
1291 self.ui.debug(_("found incomplete branch %s:%s\n")
1281 % (short(n[0]), short(n[1])))
1292 % (short(n[0]), short(n[1])))
1282 search.append(n[0:2]) # schedule branch range for scanning
1293 search.append(n[0:2]) # schedule branch range for scanning
1283 seenbranch.add(n)
1294 seenbranch.add(n)
1284 else:
1295 else:
1285 if n[1] not in seen and n[1] not in fetch:
1296 if n[1] not in seen and n[1] not in fetch:
1286 if n[2] in m and n[3] in m:
1297 if n[2] in m and n[3] in m:
1287 self.ui.debug(_("found new changeset %s\n") %
1298 self.ui.debug(_("found new changeset %s\n") %
1288 short(n[1]))
1299 short(n[1]))
1289 fetch.add(n[1]) # earliest unknown
1300 fetch.add(n[1]) # earliest unknown
1290 for p in n[2:4]:
1301 for p in n[2:4]:
1291 if p in m:
1302 if p in m:
1292 base[p] = 1 # latest known
1303 base[p] = 1 # latest known
1293
1304
1294 for p in n[2:4]:
1305 for p in n[2:4]:
1295 if p not in req and p not in m:
1306 if p not in req and p not in m:
1296 r.append(p)
1307 r.append(p)
1297 req.add(p)
1308 req.add(p)
1298 seen.add(n[0])
1309 seen.add(n[0])
1299
1310
1300 if r:
1311 if r:
1301 reqcnt += 1
1312 reqcnt += 1
1302 self.ui.debug(_("request %d: %s\n") %
1313 self.ui.debug(_("request %d: %s\n") %
1303 (reqcnt, " ".join(map(short, r))))
1314 (reqcnt, " ".join(map(short, r))))
1304 for p in xrange(0, len(r), 10):
1315 for p in xrange(0, len(r), 10):
1305 for b in remote.branches(r[p:p+10]):
1316 for b in remote.branches(r[p:p+10]):
1306 self.ui.debug(_("received %s:%s\n") %
1317 self.ui.debug(_("received %s:%s\n") %
1307 (short(b[0]), short(b[1])))
1318 (short(b[0]), short(b[1])))
1308 unknown.append(b)
1319 unknown.append(b)
1309
1320
1310 # do binary search on the branches we found
1321 # do binary search on the branches we found
1311 while search:
1322 while search:
1312 newsearch = []
1323 newsearch = []
1313 reqcnt += 1
1324 reqcnt += 1
1314 for n, l in zip(search, remote.between(search)):
1325 for n, l in zip(search, remote.between(search)):
1315 l.append(n[1])
1326 l.append(n[1])
1316 p = n[0]
1327 p = n[0]
1317 f = 1
1328 f = 1
1318 for i in l:
1329 for i in l:
1319 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1330 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1320 if i in m:
1331 if i in m:
1321 if f <= 2:
1332 if f <= 2:
1322 self.ui.debug(_("found new branch changeset %s\n") %
1333 self.ui.debug(_("found new branch changeset %s\n") %
1323 short(p))
1334 short(p))
1324 fetch.add(p)
1335 fetch.add(p)
1325 base[i] = 1
1336 base[i] = 1
1326 else:
1337 else:
1327 self.ui.debug(_("narrowed branch search to %s:%s\n")
1338 self.ui.debug(_("narrowed branch search to %s:%s\n")
1328 % (short(p), short(i)))
1339 % (short(p), short(i)))
1329 newsearch.append((p, i))
1340 newsearch.append((p, i))
1330 break
1341 break
1331 p, f = i, f * 2
1342 p, f = i, f * 2
1332 search = newsearch
1343 search = newsearch
1333
1344
1334 # sanity check our fetch list
1345 # sanity check our fetch list
1335 for f in fetch:
1346 for f in fetch:
1336 if f in m:
1347 if f in m:
1337 raise error.RepoError(_("already have changeset ")
1348 raise error.RepoError(_("already have changeset ")
1338 + short(f[:4]))
1349 + short(f[:4]))
1339
1350
1340 if base.keys() == [nullid]:
1351 if base.keys() == [nullid]:
1341 if force:
1352 if force:
1342 self.ui.warn(_("warning: repository is unrelated\n"))
1353 self.ui.warn(_("warning: repository is unrelated\n"))
1343 else:
1354 else:
1344 raise util.Abort(_("repository is unrelated"))
1355 raise util.Abort(_("repository is unrelated"))
1345
1356
1346 self.ui.debug(_("found new changesets starting at ") +
1357 self.ui.debug(_("found new changesets starting at ") +
1347 " ".join([short(f) for f in fetch]) + "\n")
1358 " ".join([short(f) for f in fetch]) + "\n")
1348
1359
1349 self.ui.debug(_("%d total queries\n") % reqcnt)
1360 self.ui.debug(_("%d total queries\n") % reqcnt)
1350
1361
1351 return base.keys(), list(fetch), heads
1362 return base.keys(), list(fetch), heads
1352
1363
1353 def findoutgoing(self, remote, base=None, heads=None, force=False):
1364 def findoutgoing(self, remote, base=None, heads=None, force=False):
1354 """Return list of nodes that are roots of subsets not in remote
1365 """Return list of nodes that are roots of subsets not in remote
1355
1366
1356 If base dict is specified, assume that these nodes and their parents
1367 If base dict is specified, assume that these nodes and their parents
1357 exist on the remote side.
1368 exist on the remote side.
1358 If a list of heads is specified, return only nodes which are heads
1369 If a list of heads is specified, return only nodes which are heads
1359 or ancestors of these heads, and return a second element which
1370 or ancestors of these heads, and return a second element which
1360 contains all remote heads which get new children.
1371 contains all remote heads which get new children.
1361 """
1372 """
1362 if base is None:
1373 if base is None:
1363 base = {}
1374 base = {}
1364 self.findincoming(remote, base, heads, force=force)
1375 self.findincoming(remote, base, heads, force=force)
1365
1376
1366 self.ui.debug(_("common changesets up to ")
1377 self.ui.debug(_("common changesets up to ")
1367 + " ".join(map(short, base.keys())) + "\n")
1378 + " ".join(map(short, base.keys())) + "\n")
1368
1379
1369 remain = set(self.changelog.nodemap)
1380 remain = set(self.changelog.nodemap)
1370
1381
1371 # prune everything remote has from the tree
1382 # prune everything remote has from the tree
1372 remain.remove(nullid)
1383 remain.remove(nullid)
1373 remove = base.keys()
1384 remove = base.keys()
1374 while remove:
1385 while remove:
1375 n = remove.pop(0)
1386 n = remove.pop(0)
1376 if n in remain:
1387 if n in remain:
1377 remain.remove(n)
1388 remain.remove(n)
1378 for p in self.changelog.parents(n):
1389 for p in self.changelog.parents(n):
1379 remove.append(p)
1390 remove.append(p)
1380
1391
1381 # find every node whose parents have been pruned
1392 # find every node whose parents have been pruned
1382 subset = []
1393 subset = []
1383 # find every remote head that will get new children
1394 # find every remote head that will get new children
1384 updated_heads = set()
1395 updated_heads = set()
1385 for n in remain:
1396 for n in remain:
1386 p1, p2 = self.changelog.parents(n)
1397 p1, p2 = self.changelog.parents(n)
1387 if p1 not in remain and p2 not in remain:
1398 if p1 not in remain and p2 not in remain:
1388 subset.append(n)
1399 subset.append(n)
1389 if heads:
1400 if heads:
1390 if p1 in heads:
1401 if p1 in heads:
1391 updated_heads.add(p1)
1402 updated_heads.add(p1)
1392 if p2 in heads:
1403 if p2 in heads:
1393 updated_heads.add(p2)
1404 updated_heads.add(p2)
1394
1405
1395 # this is the set of all roots we have to push
1406 # this is the set of all roots we have to push
1396 if heads:
1407 if heads:
1397 return subset, list(updated_heads)
1408 return subset, list(updated_heads)
1398 else:
1409 else:
1399 return subset
1410 return subset
1400
1411
1401 def pull(self, remote, heads=None, force=False):
1412 def pull(self, remote, heads=None, force=False):
1402 lock = self.lock()
1413 lock = self.lock()
1403 try:
1414 try:
1404 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1415 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1405 force=force)
1416 force=force)
1406 if fetch == [nullid]:
1417 if fetch == [nullid]:
1407 self.ui.status(_("requesting all changes\n"))
1418 self.ui.status(_("requesting all changes\n"))
1408
1419
1409 if not fetch:
1420 if not fetch:
1410 self.ui.status(_("no changes found\n"))
1421 self.ui.status(_("no changes found\n"))
1411 return 0
1422 return 0
1412
1423
1413 if heads is None and remote.capable('changegroupsubset'):
1424 if heads is None and remote.capable('changegroupsubset'):
1414 heads = rheads
1425 heads = rheads
1415
1426
1416 if heads is None:
1427 if heads is None:
1417 cg = remote.changegroup(fetch, 'pull')
1428 cg = remote.changegroup(fetch, 'pull')
1418 else:
1429 else:
1419 if not remote.capable('changegroupsubset'):
1430 if not remote.capable('changegroupsubset'):
1420 raise util.Abort(_("Partial pull cannot be done because "
1431 raise util.Abort(_("Partial pull cannot be done because "
1421 "other repository doesn't support "
1432 "other repository doesn't support "
1422 "changegroupsubset."))
1433 "changegroupsubset."))
1423 cg = remote.changegroupsubset(fetch, heads, 'pull')
1434 cg = remote.changegroupsubset(fetch, heads, 'pull')
1424 return self.addchangegroup(cg, 'pull', remote.url())
1435 return self.addchangegroup(cg, 'pull', remote.url())
1425 finally:
1436 finally:
1426 lock.release()
1437 lock.release()
1427
1438
1428 def push(self, remote, force=False, revs=None):
1439 def push(self, remote, force=False, revs=None):
1429 # there are two ways to push to remote repo:
1440 # there are two ways to push to remote repo:
1430 #
1441 #
1431 # addchangegroup assumes local user can lock remote
1442 # addchangegroup assumes local user can lock remote
1432 # repo (local filesystem, old ssh servers).
1443 # repo (local filesystem, old ssh servers).
1433 #
1444 #
1434 # unbundle assumes local user cannot lock remote repo (new ssh
1445 # unbundle assumes local user cannot lock remote repo (new ssh
1435 # servers, http servers).
1446 # servers, http servers).
1436
1447
1437 if remote.capable('unbundle'):
1448 if remote.capable('unbundle'):
1438 return self.push_unbundle(remote, force, revs)
1449 return self.push_unbundle(remote, force, revs)
1439 return self.push_addchangegroup(remote, force, revs)
1450 return self.push_addchangegroup(remote, force, revs)
1440
1451
1441 def prepush(self, remote, force, revs):
1452 def prepush(self, remote, force, revs):
1442 common = {}
1453 common = {}
1443 remote_heads = remote.heads()
1454 remote_heads = remote.heads()
1444 inc = self.findincoming(remote, common, remote_heads, force=force)
1455 inc = self.findincoming(remote, common, remote_heads, force=force)
1445
1456
1446 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1457 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1447 if revs is not None:
1458 if revs is not None:
1448 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1459 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1449 else:
1460 else:
1450 bases, heads = update, self.changelog.heads()
1461 bases, heads = update, self.changelog.heads()
1451
1462
1452 def checkbranch(lheads, rheads, updatelh):
1463 def checkbranch(lheads, rheads, updatelh):
1453 '''
1464 '''
1454 check whether there are more local heads than remote heads on
1465 check whether there are more local heads than remote heads on
1455 a specific branch.
1466 a specific branch.
1456
1467
1457 lheads: local branch heads
1468 lheads: local branch heads
1458 rheads: remote branch heads
1469 rheads: remote branch heads
1459 updatelh: outgoing local branch heads
1470 updatelh: outgoing local branch heads
1460 '''
1471 '''
1461
1472
1462 warn = 0
1473 warn = 0
1463
1474
1464 if not revs and len(lheads) > len(rheads):
1475 if not revs and len(lheads) > len(rheads):
1465 warn = 1
1476 warn = 1
1466 else:
1477 else:
1467 updatelheads = [self.changelog.heads(x, lheads)
1478 updatelheads = [self.changelog.heads(x, lheads)
1468 for x in updatelh]
1479 for x in updatelh]
1469 newheads = set(sum(updatelheads, [])) & set(lheads)
1480 newheads = set(sum(updatelheads, [])) & set(lheads)
1470
1481
1471 if not newheads:
1482 if not newheads:
1472 return True
1483 return True
1473
1484
1474 for r in rheads:
1485 for r in rheads:
1475 if r in self.changelog.nodemap:
1486 if r in self.changelog.nodemap:
1476 desc = self.changelog.heads(r, heads)
1487 desc = self.changelog.heads(r, heads)
1477 l = [h for h in heads if h in desc]
1488 l = [h for h in heads if h in desc]
1478 if not l:
1489 if not l:
1479 newheads.add(r)
1490 newheads.add(r)
1480 else:
1491 else:
1481 newheads.add(r)
1492 newheads.add(r)
1482 if len(newheads) > len(rheads):
1493 if len(newheads) > len(rheads):
1483 warn = 1
1494 warn = 1
1484
1495
1485 if warn:
1496 if warn:
1486 if not rheads: # new branch requires --force
1497 if not rheads: # new branch requires --force
1487 self.ui.warn(_("abort: push creates new"
1498 self.ui.warn(_("abort: push creates new"
1488 " remote branch '%s'!\n" %
1499 " remote branch '%s'!\n" %
1489 self[updatelh[0]].branch()))
1500 self[updatelh[0]].branch()))
1490 else:
1501 else:
1491 self.ui.warn(_("abort: push creates new remote heads!\n"))
1502 self.ui.warn(_("abort: push creates new remote heads!\n"))
1492
1503
1493 self.ui.status(_("(did you forget to merge?"
1504 self.ui.status(_("(did you forget to merge?"
1494 " use push -f to force)\n"))
1505 " use push -f to force)\n"))
1495 return False
1506 return False
1496 return True
1507 return True
1497
1508
1498 if not bases:
1509 if not bases:
1499 self.ui.status(_("no changes found\n"))
1510 self.ui.status(_("no changes found\n"))
1500 return None, 1
1511 return None, 1
1501 elif not force:
1512 elif not force:
1502 # Check for each named branch if we're creating new remote heads.
1513 # Check for each named branch if we're creating new remote heads.
1503 # To be a remote head after push, node must be either:
1514 # To be a remote head after push, node must be either:
1504 # - unknown locally
1515 # - unknown locally
1505 # - a local outgoing head descended from update
1516 # - a local outgoing head descended from update
1506 # - a remote head that's known locally and not
1517 # - a remote head that's known locally and not
1507 # ancestral to an outgoing head
1518 # ancestral to an outgoing head
1508 #
1519 #
1509 # New named branches cannot be created without --force.
1520 # New named branches cannot be created without --force.
1510
1521
1511 if remote_heads != [nullid]:
1522 if remote_heads != [nullid]:
1512 if remote.capable('branchmap'):
1523 if remote.capable('branchmap'):
1513 localhds = {}
1524 localhds = {}
1514 if not revs:
1525 if not revs:
1515 localhds = self.branchmap()
1526 localhds = self.branchmap()
1516 else:
1527 else:
1517 for n in heads:
1528 for n in heads:
1518 branch = self[n].branch()
1529 branch = self[n].branch()
1519 if branch in localhds:
1530 if branch in localhds:
1520 localhds[branch].append(n)
1531 localhds[branch].append(n)
1521 else:
1532 else:
1522 localhds[branch] = [n]
1533 localhds[branch] = [n]
1523
1534
1524 remotehds = remote.branchmap()
1535 remotehds = remote.branchmap()
1525
1536
1526 for lh in localhds:
1537 for lh in localhds:
1527 if lh in remotehds:
1538 if lh in remotehds:
1528 rheads = remotehds[lh]
1539 rheads = remotehds[lh]
1529 else:
1540 else:
1530 rheads = []
1541 rheads = []
1531 lheads = localhds[lh]
1542 lheads = localhds[lh]
1532 updatelh = [upd for upd in update
1543 updatelh = [upd for upd in update
1533 if self[upd].branch() == lh]
1544 if self[upd].branch() == lh]
1534 if not updatelh:
1545 if not updatelh:
1535 continue
1546 continue
1536 if not checkbranch(lheads, rheads, updatelh):
1547 if not checkbranch(lheads, rheads, updatelh):
1537 return None, 0
1548 return None, 0
1538 else:
1549 else:
1539 if not checkbranch(heads, remote_heads, update):
1550 if not checkbranch(heads, remote_heads, update):
1540 return None, 0
1551 return None, 0
1541
1552
1542 if inc:
1553 if inc:
1543 self.ui.warn(_("note: unsynced remote changes!\n"))
1554 self.ui.warn(_("note: unsynced remote changes!\n"))
1544
1555
1545
1556
1546 if revs is None:
1557 if revs is None:
1547 # use the fast path, no race possible on push
1558 # use the fast path, no race possible on push
1548 cg = self._changegroup(common.keys(), 'push')
1559 cg = self._changegroup(common.keys(), 'push')
1549 else:
1560 else:
1550 cg = self.changegroupsubset(update, revs, 'push')
1561 cg = self.changegroupsubset(update, revs, 'push')
1551 return cg, remote_heads
1562 return cg, remote_heads
1552
1563
1553 def push_addchangegroup(self, remote, force, revs):
1564 def push_addchangegroup(self, remote, force, revs):
1554 lock = remote.lock()
1565 lock = remote.lock()
1555 try:
1566 try:
1556 ret = self.prepush(remote, force, revs)
1567 ret = self.prepush(remote, force, revs)
1557 if ret[0] is not None:
1568 if ret[0] is not None:
1558 cg, remote_heads = ret
1569 cg, remote_heads = ret
1559 return remote.addchangegroup(cg, 'push', self.url())
1570 return remote.addchangegroup(cg, 'push', self.url())
1560 return ret[1]
1571 return ret[1]
1561 finally:
1572 finally:
1562 lock.release()
1573 lock.release()
1563
1574
1564 def push_unbundle(self, remote, force, revs):
1575 def push_unbundle(self, remote, force, revs):
1565 # local repo finds heads on server, finds out what revs it
1576 # local repo finds heads on server, finds out what revs it
1566 # must push. once revs transferred, if server finds it has
1577 # must push. once revs transferred, if server finds it has
1567 # different heads (someone else won commit/push race), server
1578 # different heads (someone else won commit/push race), server
1568 # aborts.
1579 # aborts.
1569
1580
1570 ret = self.prepush(remote, force, revs)
1581 ret = self.prepush(remote, force, revs)
1571 if ret[0] is not None:
1582 if ret[0] is not None:
1572 cg, remote_heads = ret
1583 cg, remote_heads = ret
1573 if force: remote_heads = ['force']
1584 if force: remote_heads = ['force']
1574 return remote.unbundle(cg, remote_heads, 'push')
1585 return remote.unbundle(cg, remote_heads, 'push')
1575 return ret[1]
1586 return ret[1]
1576
1587
1577 def changegroupinfo(self, nodes, source):
1588 def changegroupinfo(self, nodes, source):
1578 if self.ui.verbose or source == 'bundle':
1589 if self.ui.verbose or source == 'bundle':
1579 self.ui.status(_("%d changesets found\n") % len(nodes))
1590 self.ui.status(_("%d changesets found\n") % len(nodes))
1580 if self.ui.debugflag:
1591 if self.ui.debugflag:
1581 self.ui.debug(_("list of changesets:\n"))
1592 self.ui.debug(_("list of changesets:\n"))
1582 for node in nodes:
1593 for node in nodes:
1583 self.ui.debug("%s\n" % hex(node))
1594 self.ui.debug("%s\n" % hex(node))
1584
1595
1585 def changegroupsubset(self, bases, heads, source, extranodes=None):
1596 def changegroupsubset(self, bases, heads, source, extranodes=None):
1586 """This function generates a changegroup consisting of all the nodes
1597 """This function generates a changegroup consisting of all the nodes
1587 that are descendents of any of the bases, and ancestors of any of
1598 that are descendents of any of the bases, and ancestors of any of
1588 the heads.
1599 the heads.
1589
1600
1590 It is fairly complex as determining which filenodes and which
1601 It is fairly complex as determining which filenodes and which
1591 manifest nodes need to be included for the changeset to be complete
1602 manifest nodes need to be included for the changeset to be complete
1592 is non-trivial.
1603 is non-trivial.
1593
1604
1594 Another wrinkle is doing the reverse, figuring out which changeset in
1605 Another wrinkle is doing the reverse, figuring out which changeset in
1595 the changegroup a particular filenode or manifestnode belongs to.
1606 the changegroup a particular filenode or manifestnode belongs to.
1596
1607
1597 The caller can specify some nodes that must be included in the
1608 The caller can specify some nodes that must be included in the
1598 changegroup using the extranodes argument. It should be a dict
1609 changegroup using the extranodes argument. It should be a dict
1599 where the keys are the filenames (or 1 for the manifest), and the
1610 where the keys are the filenames (or 1 for the manifest), and the
1600 values are lists of (node, linknode) tuples, where node is a wanted
1611 values are lists of (node, linknode) tuples, where node is a wanted
1601 node and linknode is the changelog node that should be transmitted as
1612 node and linknode is the changelog node that should be transmitted as
1602 the linkrev.
1613 the linkrev.
1603 """
1614 """
1604
1615
1605 if extranodes is None:
1616 if extranodes is None:
1606 # can we go through the fast path ?
1617 # can we go through the fast path ?
1607 heads.sort()
1618 heads.sort()
1608 allheads = self.heads()
1619 allheads = self.heads()
1609 allheads.sort()
1620 allheads.sort()
1610 if heads == allheads:
1621 if heads == allheads:
1611 common = []
1622 common = []
1612 # parents of bases are known from both sides
1623 # parents of bases are known from both sides
1613 for n in bases:
1624 for n in bases:
1614 for p in self.changelog.parents(n):
1625 for p in self.changelog.parents(n):
1615 if p != nullid:
1626 if p != nullid:
1616 common.append(p)
1627 common.append(p)
1617 return self._changegroup(common, source)
1628 return self._changegroup(common, source)
1618
1629
1619 self.hook('preoutgoing', throw=True, source=source)
1630 self.hook('preoutgoing', throw=True, source=source)
1620
1631
1621 # Set up some initial variables
1632 # Set up some initial variables
1622 # Make it easy to refer to self.changelog
1633 # Make it easy to refer to self.changelog
1623 cl = self.changelog
1634 cl = self.changelog
1624 # msng is short for missing - compute the list of changesets in this
1635 # msng is short for missing - compute the list of changesets in this
1625 # changegroup.
1636 # changegroup.
1626 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1637 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1627 self.changegroupinfo(msng_cl_lst, source)
1638 self.changegroupinfo(msng_cl_lst, source)
1628 # Some bases may turn out to be superfluous, and some heads may be
1639 # Some bases may turn out to be superfluous, and some heads may be
1629 # too. nodesbetween will return the minimal set of bases and heads
1640 # too. nodesbetween will return the minimal set of bases and heads
1630 # necessary to re-create the changegroup.
1641 # necessary to re-create the changegroup.
1631
1642
1632 # Known heads are the list of heads that it is assumed the recipient
1643 # Known heads are the list of heads that it is assumed the recipient
1633 # of this changegroup will know about.
1644 # of this changegroup will know about.
1634 knownheads = set()
1645 knownheads = set()
1635 # We assume that all parents of bases are known heads.
1646 # We assume that all parents of bases are known heads.
1636 for n in bases:
1647 for n in bases:
1637 knownheads.update(cl.parents(n))
1648 knownheads.update(cl.parents(n))
1638 knownheads.discard(nullid)
1649 knownheads.discard(nullid)
1639 knownheads = list(knownheads)
1650 knownheads = list(knownheads)
1640 if knownheads:
1651 if knownheads:
1641 # Now that we know what heads are known, we can compute which
1652 # Now that we know what heads are known, we can compute which
1642 # changesets are known. The recipient must know about all
1653 # changesets are known. The recipient must know about all
1643 # changesets required to reach the known heads from the null
1654 # changesets required to reach the known heads from the null
1644 # changeset.
1655 # changeset.
1645 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1656 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1646 junk = None
1657 junk = None
1647 # Transform the list into a set.
1658 # Transform the list into a set.
1648 has_cl_set = set(has_cl_set)
1659 has_cl_set = set(has_cl_set)
1649 else:
1660 else:
1650 # If there were no known heads, the recipient cannot be assumed to
1661 # If there were no known heads, the recipient cannot be assumed to
1651 # know about any changesets.
1662 # know about any changesets.
1652 has_cl_set = set()
1663 has_cl_set = set()
1653
1664
1654 # Make it easy to refer to self.manifest
1665 # Make it easy to refer to self.manifest
1655 mnfst = self.manifest
1666 mnfst = self.manifest
1656 # We don't know which manifests are missing yet
1667 # We don't know which manifests are missing yet
1657 msng_mnfst_set = {}
1668 msng_mnfst_set = {}
1658 # Nor do we know which filenodes are missing.
1669 # Nor do we know which filenodes are missing.
1659 msng_filenode_set = {}
1670 msng_filenode_set = {}
1660
1671
1661 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1672 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1662 junk = None
1673 junk = None
1663
1674
1664 # A changeset always belongs to itself, so the changenode lookup
1675 # A changeset always belongs to itself, so the changenode lookup
1665 # function for a changenode is identity.
1676 # function for a changenode is identity.
1666 def identity(x):
1677 def identity(x):
1667 return x
1678 return x
1668
1679
1669 # A function generating function. Sets up an environment for the
1680 # A function generating function. Sets up an environment for the
1670 # inner function.
1681 # inner function.
1671 def cmp_by_rev_func(revlog):
1682 def cmp_by_rev_func(revlog):
1672 # Compare two nodes by their revision number in the environment's
1683 # Compare two nodes by their revision number in the environment's
1673 # revision history. Since the revision number both represents the
1684 # revision history. Since the revision number both represents the
1674 # most efficient order to read the nodes in, and represents a
1685 # most efficient order to read the nodes in, and represents a
1675 # topological sorting of the nodes, this function is often useful.
1686 # topological sorting of the nodes, this function is often useful.
1676 def cmp_by_rev(a, b):
1687 def cmp_by_rev(a, b):
1677 return cmp(revlog.rev(a), revlog.rev(b))
1688 return cmp(revlog.rev(a), revlog.rev(b))
1678 return cmp_by_rev
1689 return cmp_by_rev
1679
1690
1680 # If we determine that a particular file or manifest node must be a
1691 # If we determine that a particular file or manifest node must be a
1681 # node that the recipient of the changegroup will already have, we can
1692 # node that the recipient of the changegroup will already have, we can
1682 # also assume the recipient will have all the parents. This function
1693 # also assume the recipient will have all the parents. This function
1683 # prunes them from the set of missing nodes.
1694 # prunes them from the set of missing nodes.
1684 def prune_parents(revlog, hasset, msngset):
1695 def prune_parents(revlog, hasset, msngset):
1685 haslst = list(hasset)
1696 haslst = list(hasset)
1686 haslst.sort(cmp_by_rev_func(revlog))
1697 haslst.sort(cmp_by_rev_func(revlog))
1687 for node in haslst:
1698 for node in haslst:
1688 parentlst = [p for p in revlog.parents(node) if p != nullid]
1699 parentlst = [p for p in revlog.parents(node) if p != nullid]
1689 while parentlst:
1700 while parentlst:
1690 n = parentlst.pop()
1701 n = parentlst.pop()
1691 if n not in hasset:
1702 if n not in hasset:
1692 hasset.add(n)
1703 hasset.add(n)
1693 p = [p for p in revlog.parents(n) if p != nullid]
1704 p = [p for p in revlog.parents(n) if p != nullid]
1694 parentlst.extend(p)
1705 parentlst.extend(p)
1695 for n in hasset:
1706 for n in hasset:
1696 msngset.pop(n, None)
1707 msngset.pop(n, None)
1697
1708
1698 # This is a function generating function used to set up an environment
1709 # This is a function generating function used to set up an environment
1699 # for the inner function to execute in.
1710 # for the inner function to execute in.
1700 def manifest_and_file_collector(changedfileset):
1711 def manifest_and_file_collector(changedfileset):
1701 # This is an information gathering function that gathers
1712 # This is an information gathering function that gathers
1702 # information from each changeset node that goes out as part of
1713 # information from each changeset node that goes out as part of
1703 # the changegroup. The information gathered is a list of which
1714 # the changegroup. The information gathered is a list of which
1704 # manifest nodes are potentially required (the recipient may
1715 # manifest nodes are potentially required (the recipient may
1705 # already have them) and total list of all files which were
1716 # already have them) and total list of all files which were
1706 # changed in any changeset in the changegroup.
1717 # changed in any changeset in the changegroup.
1707 #
1718 #
1708 # We also remember the first changenode we saw any manifest
1719 # We also remember the first changenode we saw any manifest
1709 # referenced by so we can later determine which changenode 'owns'
1720 # referenced by so we can later determine which changenode 'owns'
1710 # the manifest.
1721 # the manifest.
1711 def collect_manifests_and_files(clnode):
1722 def collect_manifests_and_files(clnode):
1712 c = cl.read(clnode)
1723 c = cl.read(clnode)
1713 for f in c[3]:
1724 for f in c[3]:
1714 # This is to make sure we only have one instance of each
1725 # This is to make sure we only have one instance of each
1715 # filename string for each filename.
1726 # filename string for each filename.
1716 changedfileset.setdefault(f, f)
1727 changedfileset.setdefault(f, f)
1717 msng_mnfst_set.setdefault(c[0], clnode)
1728 msng_mnfst_set.setdefault(c[0], clnode)
1718 return collect_manifests_and_files
1729 return collect_manifests_and_files
1719
1730
1720 # Figure out which manifest nodes (of the ones we think might be part
1731 # Figure out which manifest nodes (of the ones we think might be part
1721 # of the changegroup) the recipient must know about and remove them
1732 # of the changegroup) the recipient must know about and remove them
1722 # from the changegroup.
1733 # from the changegroup.
1723 def prune_manifests():
1734 def prune_manifests():
1724 has_mnfst_set = set()
1735 has_mnfst_set = set()
1725 for n in msng_mnfst_set:
1736 for n in msng_mnfst_set:
1726 # If a 'missing' manifest thinks it belongs to a changenode
1737 # If a 'missing' manifest thinks it belongs to a changenode
1727 # the recipient is assumed to have, obviously the recipient
1738 # the recipient is assumed to have, obviously the recipient
1728 # must have that manifest.
1739 # must have that manifest.
1729 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1740 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1730 if linknode in has_cl_set:
1741 if linknode in has_cl_set:
1731 has_mnfst_set.add(n)
1742 has_mnfst_set.add(n)
1732 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1743 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1733
1744
1734 # Use the information collected in collect_manifests_and_files to say
1745 # Use the information collected in collect_manifests_and_files to say
1735 # which changenode any manifestnode belongs to.
1746 # which changenode any manifestnode belongs to.
1736 def lookup_manifest_link(mnfstnode):
1747 def lookup_manifest_link(mnfstnode):
1737 return msng_mnfst_set[mnfstnode]
1748 return msng_mnfst_set[mnfstnode]
1738
1749
1739 # A function generating function that sets up the initial environment
1750 # A function generating function that sets up the initial environment
1740 # the inner function.
1751 # the inner function.
1741 def filenode_collector(changedfiles):
1752 def filenode_collector(changedfiles):
1742 next_rev = [0]
1753 next_rev = [0]
1743 # This gathers information from each manifestnode included in the
1754 # This gathers information from each manifestnode included in the
1744 # changegroup about which filenodes the manifest node references
1755 # changegroup about which filenodes the manifest node references
1745 # so we can include those in the changegroup too.
1756 # so we can include those in the changegroup too.
1746 #
1757 #
1747 # It also remembers which changenode each filenode belongs to. It
1758 # It also remembers which changenode each filenode belongs to. It
1748 # does this by assuming the a filenode belongs to the changenode
1759 # does this by assuming the a filenode belongs to the changenode
1749 # the first manifest that references it belongs to.
1760 # the first manifest that references it belongs to.
1750 def collect_msng_filenodes(mnfstnode):
1761 def collect_msng_filenodes(mnfstnode):
1751 r = mnfst.rev(mnfstnode)
1762 r = mnfst.rev(mnfstnode)
1752 if r == next_rev[0]:
1763 if r == next_rev[0]:
1753 # If the last rev we looked at was the one just previous,
1764 # If the last rev we looked at was the one just previous,
1754 # we only need to see a diff.
1765 # we only need to see a diff.
1755 deltamf = mnfst.readdelta(mnfstnode)
1766 deltamf = mnfst.readdelta(mnfstnode)
1756 # For each line in the delta
1767 # For each line in the delta
1757 for f, fnode in deltamf.iteritems():
1768 for f, fnode in deltamf.iteritems():
1758 f = changedfiles.get(f, None)
1769 f = changedfiles.get(f, None)
1759 # And if the file is in the list of files we care
1770 # And if the file is in the list of files we care
1760 # about.
1771 # about.
1761 if f is not None:
1772 if f is not None:
1762 # Get the changenode this manifest belongs to
1773 # Get the changenode this manifest belongs to
1763 clnode = msng_mnfst_set[mnfstnode]
1774 clnode = msng_mnfst_set[mnfstnode]
1764 # Create the set of filenodes for the file if
1775 # Create the set of filenodes for the file if
1765 # there isn't one already.
1776 # there isn't one already.
1766 ndset = msng_filenode_set.setdefault(f, {})
1777 ndset = msng_filenode_set.setdefault(f, {})
1767 # And set the filenode's changelog node to the
1778 # And set the filenode's changelog node to the
1768 # manifest's if it hasn't been set already.
1779 # manifest's if it hasn't been set already.
1769 ndset.setdefault(fnode, clnode)
1780 ndset.setdefault(fnode, clnode)
1770 else:
1781 else:
1771 # Otherwise we need a full manifest.
1782 # Otherwise we need a full manifest.
1772 m = mnfst.read(mnfstnode)
1783 m = mnfst.read(mnfstnode)
1773 # For every file in we care about.
1784 # For every file in we care about.
1774 for f in changedfiles:
1785 for f in changedfiles:
1775 fnode = m.get(f, None)
1786 fnode = m.get(f, None)
1776 # If it's in the manifest
1787 # If it's in the manifest
1777 if fnode is not None:
1788 if fnode is not None:
1778 # See comments above.
1789 # See comments above.
1779 clnode = msng_mnfst_set[mnfstnode]
1790 clnode = msng_mnfst_set[mnfstnode]
1780 ndset = msng_filenode_set.setdefault(f, {})
1791 ndset = msng_filenode_set.setdefault(f, {})
1781 ndset.setdefault(fnode, clnode)
1792 ndset.setdefault(fnode, clnode)
1782 # Remember the revision we hope to see next.
1793 # Remember the revision we hope to see next.
1783 next_rev[0] = r + 1
1794 next_rev[0] = r + 1
1784 return collect_msng_filenodes
1795 return collect_msng_filenodes
1785
1796
1786 # We have a list of filenodes we think we need for a file, lets remove
1797 # We have a list of filenodes we think we need for a file, lets remove
1787 # all those we know the recipient must have.
1798 # all those we know the recipient must have.
1788 def prune_filenodes(f, filerevlog):
1799 def prune_filenodes(f, filerevlog):
1789 msngset = msng_filenode_set[f]
1800 msngset = msng_filenode_set[f]
1790 hasset = set()
1801 hasset = set()
1791 # If a 'missing' filenode thinks it belongs to a changenode we
1802 # If a 'missing' filenode thinks it belongs to a changenode we
1792 # assume the recipient must have, then the recipient must have
1803 # assume the recipient must have, then the recipient must have
1793 # that filenode.
1804 # that filenode.
1794 for n in msngset:
1805 for n in msngset:
1795 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1806 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1796 if clnode in has_cl_set:
1807 if clnode in has_cl_set:
1797 hasset.add(n)
1808 hasset.add(n)
1798 prune_parents(filerevlog, hasset, msngset)
1809 prune_parents(filerevlog, hasset, msngset)
1799
1810
1800 # A function generator function that sets up the a context for the
1811 # A function generator function that sets up the a context for the
1801 # inner function.
1812 # inner function.
1802 def lookup_filenode_link_func(fname):
1813 def lookup_filenode_link_func(fname):
1803 msngset = msng_filenode_set[fname]
1814 msngset = msng_filenode_set[fname]
1804 # Lookup the changenode the filenode belongs to.
1815 # Lookup the changenode the filenode belongs to.
1805 def lookup_filenode_link(fnode):
1816 def lookup_filenode_link(fnode):
1806 return msngset[fnode]
1817 return msngset[fnode]
1807 return lookup_filenode_link
1818 return lookup_filenode_link
1808
1819
1809 # Add the nodes that were explicitly requested.
1820 # Add the nodes that were explicitly requested.
1810 def add_extra_nodes(name, nodes):
1821 def add_extra_nodes(name, nodes):
1811 if not extranodes or name not in extranodes:
1822 if not extranodes or name not in extranodes:
1812 return
1823 return
1813
1824
1814 for node, linknode in extranodes[name]:
1825 for node, linknode in extranodes[name]:
1815 if node not in nodes:
1826 if node not in nodes:
1816 nodes[node] = linknode
1827 nodes[node] = linknode
1817
1828
1818 # Now that we have all theses utility functions to help out and
1829 # Now that we have all theses utility functions to help out and
1819 # logically divide up the task, generate the group.
1830 # logically divide up the task, generate the group.
1820 def gengroup():
1831 def gengroup():
1821 # The set of changed files starts empty.
1832 # The set of changed files starts empty.
1822 changedfiles = {}
1833 changedfiles = {}
1823 # Create a changenode group generator that will call our functions
1834 # Create a changenode group generator that will call our functions
1824 # back to lookup the owning changenode and collect information.
1835 # back to lookup the owning changenode and collect information.
1825 group = cl.group(msng_cl_lst, identity,
1836 group = cl.group(msng_cl_lst, identity,
1826 manifest_and_file_collector(changedfiles))
1837 manifest_and_file_collector(changedfiles))
1827 for chnk in group:
1838 for chnk in group:
1828 yield chnk
1839 yield chnk
1829
1840
1830 # The list of manifests has been collected by the generator
1841 # The list of manifests has been collected by the generator
1831 # calling our functions back.
1842 # calling our functions back.
1832 prune_manifests()
1843 prune_manifests()
1833 add_extra_nodes(1, msng_mnfst_set)
1844 add_extra_nodes(1, msng_mnfst_set)
1834 msng_mnfst_lst = msng_mnfst_set.keys()
1845 msng_mnfst_lst = msng_mnfst_set.keys()
1835 # Sort the manifestnodes by revision number.
1846 # Sort the manifestnodes by revision number.
1836 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1847 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1837 # Create a generator for the manifestnodes that calls our lookup
1848 # Create a generator for the manifestnodes that calls our lookup
1838 # and data collection functions back.
1849 # and data collection functions back.
1839 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1850 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1840 filenode_collector(changedfiles))
1851 filenode_collector(changedfiles))
1841 for chnk in group:
1852 for chnk in group:
1842 yield chnk
1853 yield chnk
1843
1854
1844 # These are no longer needed, dereference and toss the memory for
1855 # These are no longer needed, dereference and toss the memory for
1845 # them.
1856 # them.
1846 msng_mnfst_lst = None
1857 msng_mnfst_lst = None
1847 msng_mnfst_set.clear()
1858 msng_mnfst_set.clear()
1848
1859
1849 if extranodes:
1860 if extranodes:
1850 for fname in extranodes:
1861 for fname in extranodes:
1851 if isinstance(fname, int):
1862 if isinstance(fname, int):
1852 continue
1863 continue
1853 msng_filenode_set.setdefault(fname, {})
1864 msng_filenode_set.setdefault(fname, {})
1854 changedfiles[fname] = 1
1865 changedfiles[fname] = 1
1855 # Go through all our files in order sorted by name.
1866 # Go through all our files in order sorted by name.
1856 for fname in sorted(changedfiles):
1867 for fname in sorted(changedfiles):
1857 filerevlog = self.file(fname)
1868 filerevlog = self.file(fname)
1858 if not len(filerevlog):
1869 if not len(filerevlog):
1859 raise util.Abort(_("empty or missing revlog for %s") % fname)
1870 raise util.Abort(_("empty or missing revlog for %s") % fname)
1860 # Toss out the filenodes that the recipient isn't really
1871 # Toss out the filenodes that the recipient isn't really
1861 # missing.
1872 # missing.
1862 if fname in msng_filenode_set:
1873 if fname in msng_filenode_set:
1863 prune_filenodes(fname, filerevlog)
1874 prune_filenodes(fname, filerevlog)
1864 add_extra_nodes(fname, msng_filenode_set[fname])
1875 add_extra_nodes(fname, msng_filenode_set[fname])
1865 msng_filenode_lst = msng_filenode_set[fname].keys()
1876 msng_filenode_lst = msng_filenode_set[fname].keys()
1866 else:
1877 else:
1867 msng_filenode_lst = []
1878 msng_filenode_lst = []
1868 # If any filenodes are left, generate the group for them,
1879 # If any filenodes are left, generate the group for them,
1869 # otherwise don't bother.
1880 # otherwise don't bother.
1870 if len(msng_filenode_lst) > 0:
1881 if len(msng_filenode_lst) > 0:
1871 yield changegroup.chunkheader(len(fname))
1882 yield changegroup.chunkheader(len(fname))
1872 yield fname
1883 yield fname
1873 # Sort the filenodes by their revision #
1884 # Sort the filenodes by their revision #
1874 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1885 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1875 # Create a group generator and only pass in a changenode
1886 # Create a group generator and only pass in a changenode
1876 # lookup function as we need to collect no information
1887 # lookup function as we need to collect no information
1877 # from filenodes.
1888 # from filenodes.
1878 group = filerevlog.group(msng_filenode_lst,
1889 group = filerevlog.group(msng_filenode_lst,
1879 lookup_filenode_link_func(fname))
1890 lookup_filenode_link_func(fname))
1880 for chnk in group:
1891 for chnk in group:
1881 yield chnk
1892 yield chnk
1882 if fname in msng_filenode_set:
1893 if fname in msng_filenode_set:
1883 # Don't need this anymore, toss it to free memory.
1894 # Don't need this anymore, toss it to free memory.
1884 del msng_filenode_set[fname]
1895 del msng_filenode_set[fname]
1885 # Signal that no more groups are left.
1896 # Signal that no more groups are left.
1886 yield changegroup.closechunk()
1897 yield changegroup.closechunk()
1887
1898
1888 if msng_cl_lst:
1899 if msng_cl_lst:
1889 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1900 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1890
1901
1891 return util.chunkbuffer(gengroup())
1902 return util.chunkbuffer(gengroup())
1892
1903
1893 def changegroup(self, basenodes, source):
1904 def changegroup(self, basenodes, source):
1894 # to avoid a race we use changegroupsubset() (issue1320)
1905 # to avoid a race we use changegroupsubset() (issue1320)
1895 return self.changegroupsubset(basenodes, self.heads(), source)
1906 return self.changegroupsubset(basenodes, self.heads(), source)
1896
1907
1897 def _changegroup(self, common, source):
1908 def _changegroup(self, common, source):
1898 """Generate a changegroup of all nodes that we have that a recipient
1909 """Generate a changegroup of all nodes that we have that a recipient
1899 doesn't.
1910 doesn't.
1900
1911
1901 This is much easier than the previous function as we can assume that
1912 This is much easier than the previous function as we can assume that
1902 the recipient has any changenode we aren't sending them.
1913 the recipient has any changenode we aren't sending them.
1903
1914
1904 common is the set of common nodes between remote and self"""
1915 common is the set of common nodes between remote and self"""
1905
1916
1906 self.hook('preoutgoing', throw=True, source=source)
1917 self.hook('preoutgoing', throw=True, source=source)
1907
1918
1908 cl = self.changelog
1919 cl = self.changelog
1909 nodes = cl.findmissing(common)
1920 nodes = cl.findmissing(common)
1910 revset = set([cl.rev(n) for n in nodes])
1921 revset = set([cl.rev(n) for n in nodes])
1911 self.changegroupinfo(nodes, source)
1922 self.changegroupinfo(nodes, source)
1912
1923
1913 def identity(x):
1924 def identity(x):
1914 return x
1925 return x
1915
1926
1916 def gennodelst(log):
1927 def gennodelst(log):
1917 for r in log:
1928 for r in log:
1918 if log.linkrev(r) in revset:
1929 if log.linkrev(r) in revset:
1919 yield log.node(r)
1930 yield log.node(r)
1920
1931
1921 def changed_file_collector(changedfileset):
1932 def changed_file_collector(changedfileset):
1922 def collect_changed_files(clnode):
1933 def collect_changed_files(clnode):
1923 c = cl.read(clnode)
1934 c = cl.read(clnode)
1924 changedfileset.update(c[3])
1935 changedfileset.update(c[3])
1925 return collect_changed_files
1936 return collect_changed_files
1926
1937
1927 def lookuprevlink_func(revlog):
1938 def lookuprevlink_func(revlog):
1928 def lookuprevlink(n):
1939 def lookuprevlink(n):
1929 return cl.node(revlog.linkrev(revlog.rev(n)))
1940 return cl.node(revlog.linkrev(revlog.rev(n)))
1930 return lookuprevlink
1941 return lookuprevlink
1931
1942
1932 def gengroup():
1943 def gengroup():
1933 # construct a list of all changed files
1944 # construct a list of all changed files
1934 changedfiles = set()
1945 changedfiles = set()
1935
1946
1936 for chnk in cl.group(nodes, identity,
1947 for chnk in cl.group(nodes, identity,
1937 changed_file_collector(changedfiles)):
1948 changed_file_collector(changedfiles)):
1938 yield chnk
1949 yield chnk
1939
1950
1940 mnfst = self.manifest
1951 mnfst = self.manifest
1941 nodeiter = gennodelst(mnfst)
1952 nodeiter = gennodelst(mnfst)
1942 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1953 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1943 yield chnk
1954 yield chnk
1944
1955
1945 for fname in sorted(changedfiles):
1956 for fname in sorted(changedfiles):
1946 filerevlog = self.file(fname)
1957 filerevlog = self.file(fname)
1947 if not len(filerevlog):
1958 if not len(filerevlog):
1948 raise util.Abort(_("empty or missing revlog for %s") % fname)
1959 raise util.Abort(_("empty or missing revlog for %s") % fname)
1949 nodeiter = gennodelst(filerevlog)
1960 nodeiter = gennodelst(filerevlog)
1950 nodeiter = list(nodeiter)
1961 nodeiter = list(nodeiter)
1951 if nodeiter:
1962 if nodeiter:
1952 yield changegroup.chunkheader(len(fname))
1963 yield changegroup.chunkheader(len(fname))
1953 yield fname
1964 yield fname
1954 lookup = lookuprevlink_func(filerevlog)
1965 lookup = lookuprevlink_func(filerevlog)
1955 for chnk in filerevlog.group(nodeiter, lookup):
1966 for chnk in filerevlog.group(nodeiter, lookup):
1956 yield chnk
1967 yield chnk
1957
1968
1958 yield changegroup.closechunk()
1969 yield changegroup.closechunk()
1959
1970
1960 if nodes:
1971 if nodes:
1961 self.hook('outgoing', node=hex(nodes[0]), source=source)
1972 self.hook('outgoing', node=hex(nodes[0]), source=source)
1962
1973
1963 return util.chunkbuffer(gengroup())
1974 return util.chunkbuffer(gengroup())
1964
1975
1965 def addchangegroup(self, source, srctype, url, emptyok=False):
1976 def addchangegroup(self, source, srctype, url, emptyok=False):
1966 """add changegroup to repo.
1977 """add changegroup to repo.
1967
1978
1968 return values:
1979 return values:
1969 - nothing changed or no source: 0
1980 - nothing changed or no source: 0
1970 - more heads than before: 1+added heads (2..n)
1981 - more heads than before: 1+added heads (2..n)
1971 - less heads than before: -1-removed heads (-2..-n)
1982 - less heads than before: -1-removed heads (-2..-n)
1972 - number of heads stays the same: 1
1983 - number of heads stays the same: 1
1973 """
1984 """
1974 def csmap(x):
1985 def csmap(x):
1975 self.ui.debug(_("add changeset %s\n") % short(x))
1986 self.ui.debug(_("add changeset %s\n") % short(x))
1976 return len(cl)
1987 return len(cl)
1977
1988
1978 def revmap(x):
1989 def revmap(x):
1979 return cl.rev(x)
1990 return cl.rev(x)
1980
1991
1981 if not source:
1992 if not source:
1982 return 0
1993 return 0
1983
1994
1984 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1995 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1985
1996
1986 changesets = files = revisions = 0
1997 changesets = files = revisions = 0
1987
1998
1988 # write changelog data to temp files so concurrent readers will not see
1999 # write changelog data to temp files so concurrent readers will not see
1989 # inconsistent view
2000 # inconsistent view
1990 cl = self.changelog
2001 cl = self.changelog
1991 cl.delayupdate()
2002 cl.delayupdate()
1992 oldheads = len(cl.heads())
2003 oldheads = len(cl.heads())
1993
2004
1994 tr = self.transaction()
2005 tr = self.transaction()
1995 try:
2006 try:
1996 trp = weakref.proxy(tr)
2007 trp = weakref.proxy(tr)
1997 # pull off the changeset group
2008 # pull off the changeset group
1998 self.ui.status(_("adding changesets\n"))
2009 self.ui.status(_("adding changesets\n"))
1999 clstart = len(cl)
2010 clstart = len(cl)
2000 chunkiter = changegroup.chunkiter(source)
2011 chunkiter = changegroup.chunkiter(source)
2001 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2012 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2002 raise util.Abort(_("received changelog group is empty"))
2013 raise util.Abort(_("received changelog group is empty"))
2003 clend = len(cl)
2014 clend = len(cl)
2004 changesets = clend - clstart
2015 changesets = clend - clstart
2005
2016
2006 # pull off the manifest group
2017 # pull off the manifest group
2007 self.ui.status(_("adding manifests\n"))
2018 self.ui.status(_("adding manifests\n"))
2008 chunkiter = changegroup.chunkiter(source)
2019 chunkiter = changegroup.chunkiter(source)
2009 # no need to check for empty manifest group here:
2020 # no need to check for empty manifest group here:
2010 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2021 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2011 # no new manifest will be created and the manifest group will
2022 # no new manifest will be created and the manifest group will
2012 # be empty during the pull
2023 # be empty during the pull
2013 self.manifest.addgroup(chunkiter, revmap, trp)
2024 self.manifest.addgroup(chunkiter, revmap, trp)
2014
2025
2015 # process the files
2026 # process the files
2016 self.ui.status(_("adding file changes\n"))
2027 self.ui.status(_("adding file changes\n"))
2017 while 1:
2028 while 1:
2018 f = changegroup.getchunk(source)
2029 f = changegroup.getchunk(source)
2019 if not f:
2030 if not f:
2020 break
2031 break
2021 self.ui.debug(_("adding %s revisions\n") % f)
2032 self.ui.debug(_("adding %s revisions\n") % f)
2022 fl = self.file(f)
2033 fl = self.file(f)
2023 o = len(fl)
2034 o = len(fl)
2024 chunkiter = changegroup.chunkiter(source)
2035 chunkiter = changegroup.chunkiter(source)
2025 if fl.addgroup(chunkiter, revmap, trp) is None:
2036 if fl.addgroup(chunkiter, revmap, trp) is None:
2026 raise util.Abort(_("received file revlog group is empty"))
2037 raise util.Abort(_("received file revlog group is empty"))
2027 revisions += len(fl) - o
2038 revisions += len(fl) - o
2028 files += 1
2039 files += 1
2029
2040
2030 newheads = len(cl.heads())
2041 newheads = len(cl.heads())
2031 heads = ""
2042 heads = ""
2032 if oldheads and newheads != oldheads:
2043 if oldheads and newheads != oldheads:
2033 heads = _(" (%+d heads)") % (newheads - oldheads)
2044 heads = _(" (%+d heads)") % (newheads - oldheads)
2034
2045
2035 self.ui.status(_("added %d changesets"
2046 self.ui.status(_("added %d changesets"
2036 " with %d changes to %d files%s\n")
2047 " with %d changes to %d files%s\n")
2037 % (changesets, revisions, files, heads))
2048 % (changesets, revisions, files, heads))
2038
2049
2039 if changesets > 0:
2050 if changesets > 0:
2040 p = lambda: cl.writepending() and self.root or ""
2051 p = lambda: cl.writepending() and self.root or ""
2041 self.hook('pretxnchangegroup', throw=True,
2052 self.hook('pretxnchangegroup', throw=True,
2042 node=hex(cl.node(clstart)), source=srctype,
2053 node=hex(cl.node(clstart)), source=srctype,
2043 url=url, pending=p)
2054 url=url, pending=p)
2044
2055
2045 # make changelog see real files again
2056 # make changelog see real files again
2046 cl.finalize(trp)
2057 cl.finalize(trp)
2047
2058
2048 tr.close()
2059 tr.close()
2049 finally:
2060 finally:
2050 del tr
2061 del tr
2051
2062
2052 if changesets > 0:
2063 if changesets > 0:
2053 # forcefully update the on-disk branch cache
2064 # forcefully update the on-disk branch cache
2054 self.ui.debug(_("updating the branch cache\n"))
2065 self.ui.debug(_("updating the branch cache\n"))
2055 self.branchtags()
2066 self.branchtags()
2056 self.hook("changegroup", node=hex(cl.node(clstart)),
2067 self.hook("changegroup", node=hex(cl.node(clstart)),
2057 source=srctype, url=url)
2068 source=srctype, url=url)
2058
2069
2059 for i in xrange(clstart, clend):
2070 for i in xrange(clstart, clend):
2060 self.hook("incoming", node=hex(cl.node(i)),
2071 self.hook("incoming", node=hex(cl.node(i)),
2061 source=srctype, url=url)
2072 source=srctype, url=url)
2062
2073
2063 # never return 0 here:
2074 # never return 0 here:
2064 if newheads < oldheads:
2075 if newheads < oldheads:
2065 return newheads - oldheads - 1
2076 return newheads - oldheads - 1
2066 else:
2077 else:
2067 return newheads - oldheads + 1
2078 return newheads - oldheads + 1
2068
2079
2069
2080
2070 def stream_in(self, remote):
2081 def stream_in(self, remote):
2071 fp = remote.stream_out()
2082 fp = remote.stream_out()
2072 l = fp.readline()
2083 l = fp.readline()
2073 try:
2084 try:
2074 resp = int(l)
2085 resp = int(l)
2075 except ValueError:
2086 except ValueError:
2076 raise error.ResponseError(
2087 raise error.ResponseError(
2077 _('Unexpected response from remote server:'), l)
2088 _('Unexpected response from remote server:'), l)
2078 if resp == 1:
2089 if resp == 1:
2079 raise util.Abort(_('operation forbidden by server'))
2090 raise util.Abort(_('operation forbidden by server'))
2080 elif resp == 2:
2091 elif resp == 2:
2081 raise util.Abort(_('locking the remote repository failed'))
2092 raise util.Abort(_('locking the remote repository failed'))
2082 elif resp != 0:
2093 elif resp != 0:
2083 raise util.Abort(_('the server sent an unknown error code'))
2094 raise util.Abort(_('the server sent an unknown error code'))
2084 self.ui.status(_('streaming all changes\n'))
2095 self.ui.status(_('streaming all changes\n'))
2085 l = fp.readline()
2096 l = fp.readline()
2086 try:
2097 try:
2087 total_files, total_bytes = map(int, l.split(' ', 1))
2098 total_files, total_bytes = map(int, l.split(' ', 1))
2088 except (ValueError, TypeError):
2099 except (ValueError, TypeError):
2089 raise error.ResponseError(
2100 raise error.ResponseError(
2090 _('Unexpected response from remote server:'), l)
2101 _('Unexpected response from remote server:'), l)
2091 self.ui.status(_('%d files to transfer, %s of data\n') %
2102 self.ui.status(_('%d files to transfer, %s of data\n') %
2092 (total_files, util.bytecount(total_bytes)))
2103 (total_files, util.bytecount(total_bytes)))
2093 start = time.time()
2104 start = time.time()
2094 for i in xrange(total_files):
2105 for i in xrange(total_files):
2095 # XXX doesn't support '\n' or '\r' in filenames
2106 # XXX doesn't support '\n' or '\r' in filenames
2096 l = fp.readline()
2107 l = fp.readline()
2097 try:
2108 try:
2098 name, size = l.split('\0', 1)
2109 name, size = l.split('\0', 1)
2099 size = int(size)
2110 size = int(size)
2100 except (ValueError, TypeError):
2111 except (ValueError, TypeError):
2101 raise error.ResponseError(
2112 raise error.ResponseError(
2102 _('Unexpected response from remote server:'), l)
2113 _('Unexpected response from remote server:'), l)
2103 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2114 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2104 # for backwards compat, name was partially encoded
2115 # for backwards compat, name was partially encoded
2105 ofp = self.sopener(store.decodedir(name), 'w')
2116 ofp = self.sopener(store.decodedir(name), 'w')
2106 for chunk in util.filechunkiter(fp, limit=size):
2117 for chunk in util.filechunkiter(fp, limit=size):
2107 ofp.write(chunk)
2118 ofp.write(chunk)
2108 ofp.close()
2119 ofp.close()
2109 elapsed = time.time() - start
2120 elapsed = time.time() - start
2110 if elapsed <= 0:
2121 if elapsed <= 0:
2111 elapsed = 0.001
2122 elapsed = 0.001
2112 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2123 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2113 (util.bytecount(total_bytes), elapsed,
2124 (util.bytecount(total_bytes), elapsed,
2114 util.bytecount(total_bytes / elapsed)))
2125 util.bytecount(total_bytes / elapsed)))
2115 self.invalidate()
2126 self.invalidate()
2116 return len(self.heads()) + 1
2127 return len(self.heads()) + 1
2117
2128
2118 def clone(self, remote, heads=[], stream=False):
2129 def clone(self, remote, heads=[], stream=False):
2119 '''clone remote repository.
2130 '''clone remote repository.
2120
2131
2121 keyword arguments:
2132 keyword arguments:
2122 heads: list of revs to clone (forces use of pull)
2133 heads: list of revs to clone (forces use of pull)
2123 stream: use streaming clone if possible'''
2134 stream: use streaming clone if possible'''
2124
2135
2125 # now, all clients that can request uncompressed clones can
2136 # now, all clients that can request uncompressed clones can
2126 # read repo formats supported by all servers that can serve
2137 # read repo formats supported by all servers that can serve
2127 # them.
2138 # them.
2128
2139
2129 # if revlog format changes, client will have to check version
2140 # if revlog format changes, client will have to check version
2130 # and format flags on "stream" capability, and use
2141 # and format flags on "stream" capability, and use
2131 # uncompressed only if compatible.
2142 # uncompressed only if compatible.
2132
2143
2133 if stream and not heads and remote.capable('stream'):
2144 if stream and not heads and remote.capable('stream'):
2134 return self.stream_in(remote)
2145 return self.stream_in(remote)
2135 return self.pull(remote, heads)
2146 return self.pull(remote, heads)
2136
2147
2137 # used to avoid circular references so destructors work
2148 # used to avoid circular references so destructors work
2138 def aftertrans(files):
2149 def aftertrans(files):
2139 renamefiles = [tuple(t) for t in files]
2150 renamefiles = [tuple(t) for t in files]
2140 def a():
2151 def a():
2141 for src, dest in renamefiles:
2152 for src, dest in renamefiles:
2142 util.rename(src, dest)
2153 util.rename(src, dest)
2143 return a
2154 return a
2144
2155
2145 def instance(ui, path, create):
2156 def instance(ui, path, create):
2146 return localrepository(ui, util.drop_scheme('file', path), create)
2157 return localrepository(ui, util.drop_scheme('file', path), create)
2147
2158
2148 def islocal(path):
2159 def islocal(path):
2149 return True
2160 return True
General Comments 0
You need to be logged in to leave comments. Login now