##// END OF EJS Templates
merge with crew
Benoit Boissinot -
r3793:f3fbf76d merge default
parent child Browse files
Show More
@@ -1,256 +1,256 b''
1 1 """
2 2 bundlerepo.py - repository class for viewing uncompressed bundles
3 3
4 4 This provides a read-only repository interface to bundles as if
5 5 they were part of the actual repository.
6 6
7 7 Copyright 2006 Benoit Boissinot <benoit.boissinot@ens-lyon.org>
8 8
9 9 This software may be used and distributed according to the terms
10 10 of the GNU General Public License, incorporated herein by reference.
11 11 """
12 12
13 13 from node import *
14 14 from i18n import gettext as _
15 15 from demandload import demandload
16 16 demandload(globals(), "changegroup util os struct bz2 tempfile")
17 17
18 18 import localrepo, changelog, manifest, filelog, revlog
19 19
20 20 class bundlerevlog(revlog.revlog):
21 21 def __init__(self, opener, indexfile, datafile, bundlefile,
22 22 linkmapper=None):
23 23 # How it works:
24 24 # to retrieve a revision, we need to know the offset of
25 25 # the revision in the bundlefile (an opened file).
26 26 #
27 27 # We store this offset in the index (start), to differentiate a
28 28 # rev in the bundle and from a rev in the revlog, we check
29 29 # len(index[r]). If the tuple is bigger than 7, it is a bundle
30 30 # (it is bigger since we store the node to which the delta is)
31 31 #
32 32 revlog.revlog.__init__(self, opener, indexfile, datafile)
33 33 self.bundlefile = bundlefile
34 34 self.basemap = {}
35 35 def chunkpositer():
36 36 for chunk in changegroup.chunkiter(bundlefile):
37 37 pos = bundlefile.tell()
38 38 yield chunk, pos - len(chunk)
39 39 n = self.count()
40 40 prev = None
41 41 for chunk, start in chunkpositer():
42 42 size = len(chunk)
43 43 if size < 80:
44 44 raise util.Abort("invalid changegroup")
45 45 start += 80
46 46 size -= 80
47 47 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
48 48 if node in self.nodemap:
49 49 prev = node
50 50 continue
51 51 for p in (p1, p2):
52 52 if not p in self.nodemap:
53 53 raise revlog.RevlogError(_("unknown parent %s") % short(p1))
54 54 if linkmapper is None:
55 55 link = n
56 56 else:
57 57 link = linkmapper(cs)
58 58
59 59 if not prev:
60 60 prev = p1
61 61 # start, size, base is not used, link, p1, p2, delta ref
62 62 if self.version == revlog.REVLOGV0:
63 63 e = (start, size, None, link, p1, p2, node)
64 64 else:
65 65 e = (self.offset_type(start, 0), size, -1, None, link,
66 66 self.rev(p1), self.rev(p2), node)
67 67 self.basemap[n] = prev
68 68 self.index.append(e)
69 69 self.nodemap[node] = n
70 70 prev = node
71 71 n += 1
72 72
73 73 def bundle(self, rev):
74 74 """is rev from the bundle"""
75 75 if rev < 0:
76 76 return False
77 77 return rev in self.basemap
78 78 def bundlebase(self, rev): return self.basemap[rev]
79 79 def chunk(self, rev, df=None, cachelen=4096):
80 80 # Warning: in case of bundle, the diff is against bundlebase,
81 81 # not against rev - 1
82 82 # XXX: could use some caching
83 83 if not self.bundle(rev):
84 84 return revlog.revlog.chunk(self, rev, df, cachelen)
85 85 self.bundlefile.seek(self.start(rev))
86 86 return self.bundlefile.read(self.length(rev))
87 87
88 88 def revdiff(self, rev1, rev2):
89 89 """return or calculate a delta between two revisions"""
90 90 if self.bundle(rev1) and self.bundle(rev2):
91 91 # hot path for bundle
92 92 revb = self.rev(self.bundlebase(rev2))
93 93 if revb == rev1:
94 94 return self.chunk(rev2)
95 95 elif not self.bundle(rev1) and not self.bundle(rev2):
96 96 return revlog.revlog.chunk(self, rev1, rev2)
97 97
98 98 return self.diff(self.revision(self.node(rev1)),
99 99 self.revision(self.node(rev2)))
100 100
101 101 def revision(self, node):
102 102 """return an uncompressed revision of a given"""
103 103 if node == nullid: return ""
104 104
105 105 text = None
106 106 chain = []
107 107 iter_node = node
108 108 rev = self.rev(iter_node)
109 109 # reconstruct the revision if it is from a changegroup
110 110 while self.bundle(rev):
111 111 if self.cache and self.cache[0] == iter_node:
112 112 text = self.cache[2]
113 113 break
114 114 chain.append(rev)
115 115 iter_node = self.bundlebase(rev)
116 116 rev = self.rev(iter_node)
117 117 if text is None:
118 118 text = revlog.revlog.revision(self, iter_node)
119 119
120 120 while chain:
121 121 delta = self.chunk(chain.pop())
122 122 text = self.patches(text, [delta])
123 123
124 124 p1, p2 = self.parents(node)
125 125 if node != revlog.hash(text, p1, p2):
126 126 raise revlog.RevlogError(_("integrity check failed on %s:%d")
127 127 % (self.datafile, self.rev(node)))
128 128
129 129 self.cache = (node, self.rev(node), text)
130 130 return text
131 131
132 132 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
133 133 raise NotImplementedError
134 134 def addgroup(self, revs, linkmapper, transaction, unique=0):
135 135 raise NotImplementedError
136 136 def strip(self, rev, minlink):
137 137 raise NotImplementedError
138 138 def checksize(self):
139 139 raise NotImplementedError
140 140
141 141 class bundlechangelog(bundlerevlog, changelog.changelog):
142 142 def __init__(self, opener, bundlefile):
143 143 changelog.changelog.__init__(self, opener)
144 144 bundlerevlog.__init__(self, opener, self.indexfile, self.datafile,
145 145 bundlefile)
146 146
147 147 class bundlemanifest(bundlerevlog, manifest.manifest):
148 148 def __init__(self, opener, bundlefile, linkmapper):
149 149 manifest.manifest.__init__(self, opener)
150 150 bundlerevlog.__init__(self, opener, self.indexfile, self.datafile,
151 151 bundlefile, linkmapper)
152 152
153 153 class bundlefilelog(bundlerevlog, filelog.filelog):
154 154 def __init__(self, opener, path, bundlefile, linkmapper):
155 155 filelog.filelog.__init__(self, opener, path)
156 156 bundlerevlog.__init__(self, opener, self.indexfile, self.datafile,
157 157 bundlefile, linkmapper)
158 158
159 159 class bundlerepository(localrepo.localrepository):
160 160 def __init__(self, ui, path, bundlename):
161 161 localrepo.localrepository.__init__(self, ui, path)
162 162
163 163 self._url = 'bundle:' + bundlename
164 164 if path: self._url += '+' + path
165 165
166 166 self.tempfile = None
167 167 self.bundlefile = open(bundlename, "rb")
168 168 header = self.bundlefile.read(6)
169 169 if not header.startswith("HG"):
170 170 raise util.Abort(_("%s: not a Mercurial bundle file") % bundlename)
171 171 elif not header.startswith("HG10"):
172 172 raise util.Abort(_("%s: unknown bundle version") % bundlename)
173 173 elif header == "HG10BZ":
174 174 fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-",
175 175 suffix=".hg10un", dir=self.path)
176 176 self.tempfile = temp
177 177 fptemp = os.fdopen(fdtemp, 'wb')
178 178 def generator(f):
179 179 zd = bz2.BZ2Decompressor()
180 180 zd.decompress("BZ")
181 181 for chunk in f:
182 182 yield zd.decompress(chunk)
183 183 gen = generator(util.filechunkiter(self.bundlefile, 4096))
184 184
185 185 try:
186 186 fptemp.write("HG10UN")
187 187 for chunk in gen:
188 188 fptemp.write(chunk)
189 189 finally:
190 190 fptemp.close()
191 191 self.bundlefile.close()
192 192
193 193 self.bundlefile = open(self.tempfile, "rb")
194 194 # seek right after the header
195 195 self.bundlefile.seek(6)
196 196 elif header == "HG10UN":
197 197 # nothing to do
198 198 pass
199 199 else:
200 200 raise util.Abort(_("%s: unknown bundle compression type")
201 201 % bundlename)
202 self.changelog = bundlechangelog(self.opener, self.bundlefile)
203 self.manifest = bundlemanifest(self.opener, self.bundlefile,
202 self.changelog = bundlechangelog(self.sopener, self.bundlefile)
203 self.manifest = bundlemanifest(self.sopener, self.bundlefile,
204 204 self.changelog.rev)
205 205 # dict with the mapping 'filename' -> position in the bundle
206 206 self.bundlefilespos = {}
207 207 while 1:
208 208 f = changegroup.getchunk(self.bundlefile)
209 209 if not f:
210 210 break
211 211 self.bundlefilespos[f] = self.bundlefile.tell()
212 212 for c in changegroup.chunkiter(self.bundlefile):
213 213 pass
214 214
215 215 def url(self):
216 216 return self._url
217 217
218 218 def dev(self):
219 219 return -1
220 220
221 221 def file(self, f):
222 222 if f[0] == '/':
223 223 f = f[1:]
224 224 if f in self.bundlefilespos:
225 225 self.bundlefile.seek(self.bundlefilespos[f])
226 return bundlefilelog(self.opener, f, self.bundlefile,
226 return bundlefilelog(self.sopener, f, self.bundlefile,
227 227 self.changelog.rev)
228 228 else:
229 return filelog.filelog(self.opener, f)
229 return filelog.filelog(self.sopener, f)
230 230
231 231 def close(self):
232 232 """Close assigned bundle file immediately."""
233 233 self.bundlefile.close()
234 234
235 235 def __del__(self):
236 236 bundlefile = getattr(self, 'bundlefile', None)
237 237 if bundlefile and not bundlefile.closed:
238 238 bundlefile.close()
239 239 tempfile = getattr(self, 'tempfile', None)
240 240 if tempfile is not None:
241 241 os.unlink(tempfile)
242 242
243 243 def instance(ui, path, create):
244 244 if create:
245 245 raise util.Abort(_('cannot create new bundle repository'))
246 246 path = util.drop_scheme('file', path)
247 247 if path.startswith('bundle:'):
248 248 path = util.drop_scheme('bundle', path)
249 249 s = path.split("+", 1)
250 250 if len(s) == 1:
251 251 repopath, bundlename = "", s[0]
252 252 else:
253 253 repopath, bundlename = s
254 254 else:
255 255 repopath, bundlename = '', path
256 256 return bundlerepository(ui, repopath, bundlename)
@@ -1,256 +1,256 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms
7 7 # of the GNU General Public License, incorporated herein by reference.
8 8
9 9 from node import *
10 10 from repo import *
11 11 from demandload import *
12 12 from i18n import gettext as _
13 13 demandload(globals(), "localrepo bundlerepo httprepo sshrepo statichttprepo")
14 14 demandload(globals(), "errno lock os shutil util merge@_merge verify@_verify")
15 15
16 16 def _local(path):
17 17 return (os.path.isfile(util.drop_scheme('file', path)) and
18 18 bundlerepo or localrepo)
19 19
20 20 schemes = {
21 21 'bundle': bundlerepo,
22 22 'file': _local,
23 23 'hg': httprepo,
24 24 'http': httprepo,
25 25 'https': httprepo,
26 26 'old-http': statichttprepo,
27 27 'ssh': sshrepo,
28 28 'static-http': statichttprepo,
29 29 }
30 30
31 31 def _lookup(path):
32 32 scheme = 'file'
33 33 if path:
34 34 c = path.find(':')
35 35 if c > 0:
36 36 scheme = path[:c]
37 37 thing = schemes.get(scheme) or schemes['file']
38 38 try:
39 39 return thing(path)
40 40 except TypeError:
41 41 return thing
42 42
43 43 def islocal(repo):
44 44 '''return true if repo or path is local'''
45 45 if isinstance(repo, str):
46 46 try:
47 47 return _lookup(repo).islocal(repo)
48 48 except AttributeError:
49 49 return False
50 50 return repo.local()
51 51
52 52 repo_setup_hooks = []
53 53
54 54 def repository(ui, path='', create=False):
55 55 """return a repository object for the specified path"""
56 56 repo = _lookup(path).instance(ui, path, create)
57 57 for hook in repo_setup_hooks:
58 58 hook(ui, repo)
59 59 return repo
60 60
61 61 def defaultdest(source):
62 62 '''return default destination of clone if none is given'''
63 63 return os.path.basename(os.path.normpath(source))
64 64
65 65 def clone(ui, source, dest=None, pull=False, rev=None, update=True,
66 66 stream=False):
67 67 """Make a copy of an existing repository.
68 68
69 69 Create a copy of an existing repository in a new directory. The
70 70 source and destination are URLs, as passed to the repository
71 71 function. Returns a pair of repository objects, the source and
72 72 newly created destination.
73 73
74 74 The location of the source is added to the new repository's
75 75 .hg/hgrc file, as the default to be used for future pulls and
76 76 pushes.
77 77
78 78 If an exception is raised, the partly cloned/updated destination
79 79 repository will be deleted.
80 80
81 81 Arguments:
82 82
83 83 source: repository object or URL
84 84
85 85 dest: URL of destination repository to create (defaults to base
86 86 name of source repository)
87 87
88 88 pull: always pull from source repository, even in local case
89 89
90 90 stream: stream raw data uncompressed from repository (fast over
91 91 LAN, slow over WAN)
92 92
93 93 rev: revision to clone up to (implies pull=True)
94 94
95 95 update: update working directory after clone completes, if
96 96 destination is local repository
97 97 """
98 98 if isinstance(source, str):
99 99 src_repo = repository(ui, source)
100 100 else:
101 101 src_repo = source
102 102 source = src_repo.url()
103 103
104 104 if dest is None:
105 105 dest = defaultdest(source)
106 106
107 107 def localpath(path):
108 108 if path.startswith('file://'):
109 109 return path[7:]
110 110 if path.startswith('file:'):
111 111 return path[5:]
112 112 return path
113 113
114 114 dest = localpath(dest)
115 115 source = localpath(source)
116 116
117 117 if os.path.exists(dest):
118 118 raise util.Abort(_("destination '%s' already exists") % dest)
119 119
120 120 class DirCleanup(object):
121 121 def __init__(self, dir_):
122 122 self.rmtree = shutil.rmtree
123 123 self.dir_ = dir_
124 124 def close(self):
125 125 self.dir_ = None
126 126 def __del__(self):
127 127 if self.dir_:
128 128 self.rmtree(self.dir_, True)
129 129
130 130 dest_repo = repository(ui, dest, create=True)
131 131
132 dest_path = None
133 132 dir_cleanup = None
134 133 if dest_repo.local():
135 dest_path = os.path.realpath(dest_repo.root)
136 dir_cleanup = DirCleanup(dest_path)
134 dir_cleanup = DirCleanup(os.path.realpath(dest_repo.root))
137 135
138 136 abspath = source
139 137 copy = False
140 138 if src_repo.local() and dest_repo.local():
141 139 abspath = os.path.abspath(source)
142 140 copy = not pull and not rev
143 141
144 142 src_lock, dest_lock = None, None
145 143 if copy:
146 144 try:
147 145 # we use a lock here because if we race with commit, we
148 146 # can end up with extra data in the cloned revlogs that's
149 147 # not pointed to by changesets, thus causing verify to
150 148 # fail
151 149 src_lock = src_repo.lock()
152 150 except lock.LockException:
153 151 copy = False
154 152
155 153 if copy:
156 154 # we lock here to avoid premature writing to the target
157 dest_lock = lock.lock(os.path.join(dest_path, ".hg", "lock"))
155 src_store = os.path.realpath(src_repo.spath)
156 dest_store = os.path.realpath(dest_repo.spath)
157 dest_lock = lock.lock(os.path.join(dest_store, "lock"))
158 158
159 159 files = ("data",
160 160 "00manifest.d", "00manifest.i",
161 161 "00changelog.d", "00changelog.i")
162 162 for f in files:
163 src = os.path.join(source, ".hg", f)
164 dst = os.path.join(dest_path, ".hg", f)
163 src = os.path.join(src_store, f)
164 dst = os.path.join(dest_store, f)
165 165 try:
166 166 util.copyfiles(src, dst)
167 167 except OSError, inst:
168 168 if inst.errno != errno.ENOENT:
169 169 raise
170 170
171 171 # we need to re-init the repo after manually copying the data
172 172 # into it
173 173 dest_repo = repository(ui, dest)
174 174
175 175 else:
176 176 revs = None
177 177 if rev:
178 178 if 'lookup' not in src_repo.capabilities:
179 179 raise util.Abort(_("src repository does not support revision "
180 180 "lookup and so doesn't support clone by "
181 181 "revision"))
182 182 revs = [src_repo.lookup(r) for r in rev]
183 183
184 184 if dest_repo.local():
185 185 dest_repo.clone(src_repo, heads=revs, stream=stream)
186 186 elif src_repo.local():
187 187 src_repo.push(dest_repo, revs=revs)
188 188 else:
189 189 raise util.Abort(_("clone from remote to remote not supported"))
190 190
191 191 if src_lock:
192 192 src_lock.release()
193 193
194 194 if dest_repo.local():
195 195 fp = dest_repo.opener("hgrc", "w", text=True)
196 196 fp.write("[paths]\n")
197 197 fp.write("default = %s\n" % abspath)
198 198 fp.close()
199 199
200 200 if dest_lock:
201 201 dest_lock.release()
202 202
203 203 if update:
204 204 _update(dest_repo, dest_repo.changelog.tip())
205 205 if dir_cleanup:
206 206 dir_cleanup.close()
207 207
208 208 return src_repo, dest_repo
209 209
210 210 def _showstats(repo, stats):
211 211 stats = ((stats[0], _("updated")),
212 212 (stats[1], _("merged")),
213 213 (stats[2], _("removed")),
214 214 (stats[3], _("unresolved")))
215 215 note = ", ".join([_("%d files %s") % s for s in stats])
216 216 repo.ui.status("%s\n" % note)
217 217
218 218 def _update(repo, node): return update(repo, node)
219 219
220 220 def update(repo, node):
221 221 """update the working directory to node, merging linear changes"""
222 222 stats = _merge.update(repo, node, False, False, None, None)
223 223 _showstats(repo, stats)
224 224 if stats[3]:
225 225 repo.ui.status(_("There are unresolved merges with"
226 226 " locally modified files.\n"))
227 227 return stats[3]
228 228
229 229 def clean(repo, node, wlock=None, show_stats=True):
230 230 """forcibly switch the working directory to node, clobbering changes"""
231 231 stats = _merge.update(repo, node, False, True, None, wlock)
232 232 if show_stats: _showstats(repo, stats)
233 233 return stats[3]
234 234
235 235 def merge(repo, node, force=None, remind=True, wlock=None):
236 236 """branch merge with node, resolving changes"""
237 237 stats = _merge.update(repo, node, True, force, False, wlock)
238 238 _showstats(repo, stats)
239 239 if stats[3]:
240 240 pl = repo.parents()
241 241 repo.ui.status(_("There are unresolved merges,"
242 242 " you can redo the full merge using:\n"
243 243 " hg update -C %s\n"
244 244 " hg merge %s\n")
245 245 % (pl[0].rev(), pl[1].rev()))
246 246 elif remind:
247 247 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
248 248 return stats[3]
249 249
250 250 def revert(repo, node, choose, wlock):
251 251 """revert changes to revision in node without updating dirstate"""
252 252 return _merge.update(repo, node, False, True, choose, wlock)[3]
253 253
254 254 def verify(repo):
255 255 """verify the consistency of a repository"""
256 256 return _verify.verify(repo)
@@ -1,1912 +1,1916 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 import repo
12 12 demandload(globals(), "appendfile changegroup")
13 13 demandload(globals(), "changelog dirstate filelog manifest context")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "os revlog time util")
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = ('lookup', 'changegroupsubset')
19 19
20 20 def __del__(self):
21 21 self.transhandle = None
22 22 def __init__(self, parentui, path=None, create=0):
23 23 repo.repository.__init__(self)
24 24 if not path:
25 25 p = os.getcwd()
26 26 while not os.path.isdir(os.path.join(p, ".hg")):
27 27 oldp = p
28 28 p = os.path.dirname(p)
29 29 if p == oldp:
30 30 raise repo.RepoError(_("There is no Mercurial repository"
31 31 " here (.hg not found)"))
32 32 path = p
33 33 self.path = os.path.join(path, ".hg")
34 self.spath = self.path
34 35
35 36 if not os.path.isdir(self.path):
36 37 if create:
37 38 if not os.path.exists(path):
38 39 os.mkdir(path)
39 40 os.mkdir(self.path)
41 if self.spath != self.path:
42 os.mkdir(self.spath)
40 43 else:
41 44 raise repo.RepoError(_("repository %s not found") % path)
42 45 elif create:
43 46 raise repo.RepoError(_("repository %s already exists") % path)
44 47
45 48 self.root = os.path.realpath(path)
46 49 self.origroot = path
47 50 self.ui = ui.ui(parentui=parentui)
48 51 self.opener = util.opener(self.path)
49 self.sopener = util.opener(self.path)
52 self.sopener = util.opener(self.spath)
50 53 self.wopener = util.opener(self.root)
51 54
52 55 try:
53 56 self.ui.readconfig(self.join("hgrc"), self.root)
54 57 except IOError:
55 58 pass
56 59
57 60 v = self.ui.configrevlog()
58 61 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
59 62 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
60 63 fl = v.get('flags', None)
61 64 flags = 0
62 65 if fl != None:
63 66 for x in fl.split():
64 67 flags |= revlog.flagstr(x)
65 68 elif self.revlogv1:
66 69 flags = revlog.REVLOG_DEFAULT_FLAGS
67 70
68 71 v = self.revlogversion | flags
69 72 self.manifest = manifest.manifest(self.sopener, v)
70 73 self.changelog = changelog.changelog(self.sopener, v)
71 74
72 75 # the changelog might not have the inline index flag
73 76 # on. If the format of the changelog is the same as found in
74 77 # .hgrc, apply any flags found in the .hgrc as well.
75 78 # Otherwise, just version from the changelog
76 79 v = self.changelog.version
77 80 if v == self.revlogversion:
78 81 v |= flags
79 82 self.revlogversion = v
80 83
81 84 self.tagscache = None
82 85 self.branchcache = None
83 86 self.nodetagscache = None
84 87 self.encodepats = None
85 88 self.decodepats = None
86 89 self.transhandle = None
87 90
88 91 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
89 92
90 93 def url(self):
91 94 return 'file:' + self.root
92 95
93 96 def hook(self, name, throw=False, **args):
94 97 def callhook(hname, funcname):
95 98 '''call python hook. hook is callable object, looked up as
96 99 name in python module. if callable returns "true", hook
97 100 fails, else passes. if hook raises exception, treated as
98 101 hook failure. exception propagates if throw is "true".
99 102
100 103 reason for "true" meaning "hook failed" is so that
101 104 unmodified commands (e.g. mercurial.commands.update) can
102 105 be run as hooks without wrappers to convert return values.'''
103 106
104 107 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
105 108 d = funcname.rfind('.')
106 109 if d == -1:
107 110 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
108 111 % (hname, funcname))
109 112 modname = funcname[:d]
110 113 try:
111 114 obj = __import__(modname)
112 115 except ImportError:
113 116 try:
114 117 # extensions are loaded with hgext_ prefix
115 118 obj = __import__("hgext_%s" % modname)
116 119 except ImportError:
117 120 raise util.Abort(_('%s hook is invalid '
118 121 '(import of "%s" failed)') %
119 122 (hname, modname))
120 123 try:
121 124 for p in funcname.split('.')[1:]:
122 125 obj = getattr(obj, p)
123 126 except AttributeError, err:
124 127 raise util.Abort(_('%s hook is invalid '
125 128 '("%s" is not defined)') %
126 129 (hname, funcname))
127 130 if not callable(obj):
128 131 raise util.Abort(_('%s hook is invalid '
129 132 '("%s" is not callable)') %
130 133 (hname, funcname))
131 134 try:
132 135 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
133 136 except (KeyboardInterrupt, util.SignalInterrupt):
134 137 raise
135 138 except Exception, exc:
136 139 if isinstance(exc, util.Abort):
137 140 self.ui.warn(_('error: %s hook failed: %s\n') %
138 141 (hname, exc.args[0]))
139 142 else:
140 143 self.ui.warn(_('error: %s hook raised an exception: '
141 144 '%s\n') % (hname, exc))
142 145 if throw:
143 146 raise
144 147 self.ui.print_exc()
145 148 return True
146 149 if r:
147 150 if throw:
148 151 raise util.Abort(_('%s hook failed') % hname)
149 152 self.ui.warn(_('warning: %s hook failed\n') % hname)
150 153 return r
151 154
152 155 def runhook(name, cmd):
153 156 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
154 157 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
155 158 r = util.system(cmd, environ=env, cwd=self.root)
156 159 if r:
157 160 desc, r = util.explain_exit(r)
158 161 if throw:
159 162 raise util.Abort(_('%s hook %s') % (name, desc))
160 163 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
161 164 return r
162 165
163 166 r = False
164 167 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
165 168 if hname.split(".", 1)[0] == name and cmd]
166 169 hooks.sort()
167 170 for hname, cmd in hooks:
168 171 if cmd.startswith('python:'):
169 172 r = callhook(hname, cmd[7:].strip()) or r
170 173 else:
171 174 r = runhook(hname, cmd) or r
172 175 return r
173 176
174 177 tag_disallowed = ':\r\n'
175 178
176 179 def tag(self, name, node, message, local, user, date):
177 180 '''tag a revision with a symbolic name.
178 181
179 182 if local is True, the tag is stored in a per-repository file.
180 183 otherwise, it is stored in the .hgtags file, and a new
181 184 changeset is committed with the change.
182 185
183 186 keyword arguments:
184 187
185 188 local: whether to store tag in non-version-controlled file
186 189 (default False)
187 190
188 191 message: commit message to use if committing
189 192
190 193 user: name of user to use if committing
191 194
192 195 date: date tuple to use if committing'''
193 196
194 197 for c in self.tag_disallowed:
195 198 if c in name:
196 199 raise util.Abort(_('%r cannot be used in a tag name') % c)
197 200
198 201 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
199 202
200 203 if local:
201 204 # local tags are stored in the current charset
202 205 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
203 206 self.hook('tag', node=hex(node), tag=name, local=local)
204 207 return
205 208
206 209 for x in self.status()[:5]:
207 210 if '.hgtags' in x:
208 211 raise util.Abort(_('working copy of .hgtags is changed '
209 212 '(please commit .hgtags manually)'))
210 213
211 214 # committed tags are stored in UTF-8
212 215 line = '%s %s\n' % (hex(node), util.fromlocal(name))
213 216 self.wfile('.hgtags', 'ab').write(line)
214 217 if self.dirstate.state('.hgtags') == '?':
215 218 self.add(['.hgtags'])
216 219
217 220 self.commit(['.hgtags'], message, user, date)
218 221 self.hook('tag', node=hex(node), tag=name, local=local)
219 222
220 223 def tags(self):
221 224 '''return a mapping of tag to node'''
222 225 if not self.tagscache:
223 226 self.tagscache = {}
224 227
225 228 def parsetag(line, context):
226 229 if not line:
227 230 return
228 231 s = l.split(" ", 1)
229 232 if len(s) != 2:
230 233 self.ui.warn(_("%s: cannot parse entry\n") % context)
231 234 return
232 235 node, key = s
233 236 key = util.tolocal(key.strip()) # stored in UTF-8
234 237 try:
235 238 bin_n = bin(node)
236 239 except TypeError:
237 240 self.ui.warn(_("%s: node '%s' is not well formed\n") %
238 241 (context, node))
239 242 return
240 243 if bin_n not in self.changelog.nodemap:
241 244 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
242 245 (context, key))
243 246 return
244 247 self.tagscache[key] = bin_n
245 248
246 249 # read the tags file from each head, ending with the tip,
247 250 # and add each tag found to the map, with "newer" ones
248 251 # taking precedence
249 252 f = None
250 253 for rev, node, fnode in self._hgtagsnodes():
251 254 f = (f and f.filectx(fnode) or
252 255 self.filectx('.hgtags', fileid=fnode))
253 256 count = 0
254 257 for l in f.data().splitlines():
255 258 count += 1
256 259 parsetag(l, _("%s, line %d") % (str(f), count))
257 260
258 261 try:
259 262 f = self.opener("localtags")
260 263 count = 0
261 264 for l in f:
262 265 # localtags are stored in the local character set
263 266 # while the internal tag table is stored in UTF-8
264 267 l = util.fromlocal(l)
265 268 count += 1
266 269 parsetag(l, _("localtags, line %d") % count)
267 270 except IOError:
268 271 pass
269 272
270 273 self.tagscache['tip'] = self.changelog.tip()
271 274
272 275 return self.tagscache
273 276
274 277 def _hgtagsnodes(self):
275 278 heads = self.heads()
276 279 heads.reverse()
277 280 last = {}
278 281 ret = []
279 282 for node in heads:
280 283 c = self.changectx(node)
281 284 rev = c.rev()
282 285 try:
283 286 fnode = c.filenode('.hgtags')
284 287 except repo.LookupError:
285 288 continue
286 289 ret.append((rev, node, fnode))
287 290 if fnode in last:
288 291 ret[last[fnode]] = None
289 292 last[fnode] = len(ret) - 1
290 293 return [item for item in ret if item]
291 294
292 295 def tagslist(self):
293 296 '''return a list of tags ordered by revision'''
294 297 l = []
295 298 for t, n in self.tags().items():
296 299 try:
297 300 r = self.changelog.rev(n)
298 301 except:
299 302 r = -2 # sort to the beginning of the list if unknown
300 303 l.append((r, t, n))
301 304 l.sort()
302 305 return [(t, n) for r, t, n in l]
303 306
304 307 def nodetags(self, node):
305 308 '''return the tags associated with a node'''
306 309 if not self.nodetagscache:
307 310 self.nodetagscache = {}
308 311 for t, n in self.tags().items():
309 312 self.nodetagscache.setdefault(n, []).append(t)
310 313 return self.nodetagscache.get(node, [])
311 314
312 315 def branchtags(self):
313 316 if self.branchcache != None:
314 317 return self.branchcache
315 318
316 319 self.branchcache = {} # avoid recursion in changectx
317 320
318 321 partial, last, lrev = self._readbranchcache()
319 322
320 323 tiprev = self.changelog.count() - 1
321 324 if lrev != tiprev:
322 325 self._updatebranchcache(partial, lrev+1, tiprev+1)
323 326 self._writebranchcache(partial, self.changelog.tip(), tiprev)
324 327
325 328 # the branch cache is stored on disk as UTF-8, but in the local
326 329 # charset internally
327 330 for k, v in partial.items():
328 331 self.branchcache[util.tolocal(k)] = v
329 332 return self.branchcache
330 333
331 334 def _readbranchcache(self):
332 335 partial = {}
333 336 try:
334 337 f = self.opener("branches.cache")
335 338 lines = f.read().split('\n')
336 339 f.close()
337 340 last, lrev = lines.pop(0).rstrip().split(" ", 1)
338 341 last, lrev = bin(last), int(lrev)
339 342 if not (lrev < self.changelog.count() and
340 343 self.changelog.node(lrev) == last): # sanity check
341 344 # invalidate the cache
342 345 raise ValueError('Invalid branch cache: unknown tip')
343 346 for l in lines:
344 347 if not l: continue
345 348 node, label = l.rstrip().split(" ", 1)
346 349 partial[label] = bin(node)
347 350 except (KeyboardInterrupt, util.SignalInterrupt):
348 351 raise
349 352 except Exception, inst:
350 353 if self.ui.debugflag:
351 354 self.ui.warn(str(inst), '\n')
352 355 partial, last, lrev = {}, nullid, nullrev
353 356 return partial, last, lrev
354 357
355 358 def _writebranchcache(self, branches, tip, tiprev):
356 359 try:
357 360 f = self.opener("branches.cache", "w")
358 361 f.write("%s %s\n" % (hex(tip), tiprev))
359 362 for label, node in branches.iteritems():
360 363 f.write("%s %s\n" % (hex(node), label))
361 364 except IOError:
362 365 pass
363 366
364 367 def _updatebranchcache(self, partial, start, end):
365 368 for r in xrange(start, end):
366 369 c = self.changectx(r)
367 370 b = c.branch()
368 371 if b:
369 372 partial[b] = c.node()
370 373
371 374 def lookup(self, key):
372 375 if key == '.':
373 376 key = self.dirstate.parents()[0]
374 377 if key == nullid:
375 378 raise repo.RepoError(_("no revision checked out"))
376 379 n = self.changelog._match(key)
377 380 if n:
378 381 return n
379 382 if key in self.tags():
380 383 return self.tags()[key]
381 384 if key in self.branchtags():
382 385 return self.branchtags()[key]
383 386 n = self.changelog._partialmatch(key)
384 387 if n:
385 388 return n
386 389 raise repo.RepoError(_("unknown revision '%s'") % key)
387 390
388 391 def dev(self):
389 392 return os.lstat(self.path).st_dev
390 393
391 394 def local(self):
392 395 return True
393 396
394 397 def join(self, f):
395 398 return os.path.join(self.path, f)
396 399
397 400 def sjoin(self, f):
398 return os.path.join(self.path, f)
401 return os.path.join(self.spath, f)
399 402
400 403 def wjoin(self, f):
401 404 return os.path.join(self.root, f)
402 405
403 406 def file(self, f):
404 407 if f[0] == '/':
405 408 f = f[1:]
406 409 return filelog.filelog(self.sopener, f, self.revlogversion)
407 410
408 411 def changectx(self, changeid=None):
409 412 return context.changectx(self, changeid)
410 413
411 414 def workingctx(self):
412 415 return context.workingctx(self)
413 416
414 417 def parents(self, changeid=None):
415 418 '''
416 419 get list of changectxs for parents of changeid or working directory
417 420 '''
418 421 if changeid is None:
419 422 pl = self.dirstate.parents()
420 423 else:
421 424 n = self.changelog.lookup(changeid)
422 425 pl = self.changelog.parents(n)
423 426 if pl[1] == nullid:
424 427 return [self.changectx(pl[0])]
425 428 return [self.changectx(pl[0]), self.changectx(pl[1])]
426 429
427 430 def filectx(self, path, changeid=None, fileid=None):
428 431 """changeid can be a changeset revision, node, or tag.
429 432 fileid can be a file revision or node."""
430 433 return context.filectx(self, path, changeid, fileid)
431 434
432 435 def getcwd(self):
433 436 return self.dirstate.getcwd()
434 437
435 438 def wfile(self, f, mode='r'):
436 439 return self.wopener(f, mode)
437 440
438 441 def wread(self, filename):
439 442 if self.encodepats == None:
440 443 l = []
441 444 for pat, cmd in self.ui.configitems("encode"):
442 445 mf = util.matcher(self.root, "", [pat], [], [])[1]
443 446 l.append((mf, cmd))
444 447 self.encodepats = l
445 448
446 449 data = self.wopener(filename, 'r').read()
447 450
448 451 for mf, cmd in self.encodepats:
449 452 if mf(filename):
450 453 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
451 454 data = util.filter(data, cmd)
452 455 break
453 456
454 457 return data
455 458
456 459 def wwrite(self, filename, data, fd=None):
457 460 if self.decodepats == None:
458 461 l = []
459 462 for pat, cmd in self.ui.configitems("decode"):
460 463 mf = util.matcher(self.root, "", [pat], [], [])[1]
461 464 l.append((mf, cmd))
462 465 self.decodepats = l
463 466
464 467 for mf, cmd in self.decodepats:
465 468 if mf(filename):
466 469 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
467 470 data = util.filter(data, cmd)
468 471 break
469 472
470 473 if fd:
471 474 return fd.write(data)
472 475 return self.wopener(filename, 'w').write(data)
473 476
474 477 def transaction(self):
475 478 tr = self.transhandle
476 479 if tr != None and tr.running():
477 480 return tr.nest()
478 481
479 482 # save dirstate for rollback
480 483 try:
481 484 ds = self.opener("dirstate").read()
482 485 except IOError:
483 486 ds = ""
484 487 self.opener("journal.dirstate", "w").write(ds)
485 488
489 renames = [(self.sjoin("journal"), self.sjoin("undo")),
490 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
486 491 tr = transaction.transaction(self.ui.warn, self.sopener,
487 492 self.sjoin("journal"),
488 aftertrans(self.path))
493 aftertrans(renames))
489 494 self.transhandle = tr
490 495 return tr
491 496
492 497 def recover(self):
493 498 l = self.lock()
494 499 if os.path.exists(self.sjoin("journal")):
495 500 self.ui.status(_("rolling back interrupted transaction\n"))
496 501 transaction.rollback(self.sopener, self.sjoin("journal"))
497 502 self.reload()
498 503 return True
499 504 else:
500 505 self.ui.warn(_("no interrupted transaction available\n"))
501 506 return False
502 507
503 508 def rollback(self, wlock=None):
504 509 if not wlock:
505 510 wlock = self.wlock()
506 511 l = self.lock()
507 512 if os.path.exists(self.sjoin("undo")):
508 513 self.ui.status(_("rolling back last transaction\n"))
509 514 transaction.rollback(self.sopener, self.sjoin("undo"))
510 515 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
511 516 self.reload()
512 517 self.wreload()
513 518 else:
514 519 self.ui.warn(_("no rollback information available\n"))
515 520
516 521 def wreload(self):
517 522 self.dirstate.read()
518 523
519 524 def reload(self):
520 525 self.changelog.load()
521 526 self.manifest.load()
522 527 self.tagscache = None
523 528 self.nodetagscache = None
524 529
525 530 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
526 531 desc=None):
527 532 try:
528 533 l = lock.lock(lockname, 0, releasefn, desc=desc)
529 534 except lock.LockHeld, inst:
530 535 if not wait:
531 536 raise
532 537 self.ui.warn(_("waiting for lock on %s held by %r\n") %
533 538 (desc, inst.locker))
534 539 # default to 600 seconds timeout
535 540 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
536 541 releasefn, desc=desc)
537 542 if acquirefn:
538 543 acquirefn()
539 544 return l
540 545
541 546 def lock(self, wait=1):
542 547 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
543 548 desc=_('repository %s') % self.origroot)
544 549
545 550 def wlock(self, wait=1):
546 551 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
547 552 self.wreload,
548 553 desc=_('working directory of %s') % self.origroot)
549 554
550 555 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
551 556 """
552 557 commit an individual file as part of a larger transaction
553 558 """
554 559
555 560 t = self.wread(fn)
556 561 fl = self.file(fn)
557 562 fp1 = manifest1.get(fn, nullid)
558 563 fp2 = manifest2.get(fn, nullid)
559 564
560 565 meta = {}
561 566 cp = self.dirstate.copied(fn)
562 567 if cp:
563 568 meta["copy"] = cp
564 569 if not manifest2: # not a branch merge
565 570 meta["copyrev"] = hex(manifest1.get(cp, nullid))
566 571 fp2 = nullid
567 572 elif fp2 != nullid: # copied on remote side
568 573 meta["copyrev"] = hex(manifest1.get(cp, nullid))
569 574 elif fp1 != nullid: # copied on local side, reversed
570 575 meta["copyrev"] = hex(manifest2.get(cp))
571 576 fp2 = nullid
572 577 else: # directory rename
573 578 meta["copyrev"] = hex(manifest1.get(cp, nullid))
574 579 self.ui.debug(_(" %s: copy %s:%s\n") %
575 580 (fn, cp, meta["copyrev"]))
576 581 fp1 = nullid
577 582 elif fp2 != nullid:
578 583 # is one parent an ancestor of the other?
579 584 fpa = fl.ancestor(fp1, fp2)
580 585 if fpa == fp1:
581 586 fp1, fp2 = fp2, nullid
582 587 elif fpa == fp2:
583 588 fp2 = nullid
584 589
585 590 # is the file unmodified from the parent? report existing entry
586 591 if fp2 == nullid and not fl.cmp(fp1, t):
587 592 return fp1
588 593
589 594 changelist.append(fn)
590 595 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
591 596
592 597 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
593 598 if p1 is None:
594 599 p1, p2 = self.dirstate.parents()
595 600 return self.commit(files=files, text=text, user=user, date=date,
596 601 p1=p1, p2=p2, wlock=wlock)
597 602
598 603 def commit(self, files=None, text="", user=None, date=None,
599 604 match=util.always, force=False, lock=None, wlock=None,
600 605 force_editor=False, p1=None, p2=None, extra={}):
601 606
602 607 commit = []
603 608 remove = []
604 609 changed = []
605 610 use_dirstate = (p1 is None) # not rawcommit
606 611 extra = extra.copy()
607 612
608 613 if use_dirstate:
609 614 if files:
610 615 for f in files:
611 616 s = self.dirstate.state(f)
612 617 if s in 'nmai':
613 618 commit.append(f)
614 619 elif s == 'r':
615 620 remove.append(f)
616 621 else:
617 622 self.ui.warn(_("%s not tracked!\n") % f)
618 623 else:
619 624 changes = self.status(match=match)[:5]
620 625 modified, added, removed, deleted, unknown = changes
621 626 commit = modified + added
622 627 remove = removed
623 628 else:
624 629 commit = files
625 630
626 631 if use_dirstate:
627 632 p1, p2 = self.dirstate.parents()
628 633 update_dirstate = True
629 634 else:
630 635 p1, p2 = p1, p2 or nullid
631 636 update_dirstate = (self.dirstate.parents()[0] == p1)
632 637
633 638 c1 = self.changelog.read(p1)
634 639 c2 = self.changelog.read(p2)
635 640 m1 = self.manifest.read(c1[0]).copy()
636 641 m2 = self.manifest.read(c2[0])
637 642
638 643 if use_dirstate:
639 644 branchname = util.fromlocal(self.workingctx().branch())
640 645 else:
641 646 branchname = ""
642 647
643 648 if use_dirstate:
644 649 oldname = c1[5].get("branch", "") # stored in UTF-8
645 650 if not commit and not remove and not force and p2 == nullid and \
646 651 branchname == oldname:
647 652 self.ui.status(_("nothing changed\n"))
648 653 return None
649 654
650 655 xp1 = hex(p1)
651 656 if p2 == nullid: xp2 = ''
652 657 else: xp2 = hex(p2)
653 658
654 659 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
655 660
656 661 if not wlock:
657 662 wlock = self.wlock()
658 663 if not lock:
659 664 lock = self.lock()
660 665 tr = self.transaction()
661 666
662 667 # check in files
663 668 new = {}
664 669 linkrev = self.changelog.count()
665 670 commit.sort()
666 671 for f in commit:
667 672 self.ui.note(f + "\n")
668 673 try:
669 674 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
670 675 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
671 676 except IOError:
672 677 if use_dirstate:
673 678 self.ui.warn(_("trouble committing %s!\n") % f)
674 679 raise
675 680 else:
676 681 remove.append(f)
677 682
678 683 # update manifest
679 684 m1.update(new)
680 685 remove.sort()
681 686
682 687 for f in remove:
683 688 if f in m1:
684 689 del m1[f]
685 690 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
686 691
687 692 # add changeset
688 693 new = new.keys()
689 694 new.sort()
690 695
691 696 user = user or self.ui.username()
692 697 if not text or force_editor:
693 698 edittext = []
694 699 if text:
695 700 edittext.append(text)
696 701 edittext.append("")
697 702 edittext.append("HG: user: %s" % user)
698 703 if p2 != nullid:
699 704 edittext.append("HG: branch merge")
700 705 edittext.extend(["HG: changed %s" % f for f in changed])
701 706 edittext.extend(["HG: removed %s" % f for f in remove])
702 707 if not changed and not remove:
703 708 edittext.append("HG: no files changed")
704 709 edittext.append("")
705 710 # run editor in the repository root
706 711 olddir = os.getcwd()
707 712 os.chdir(self.root)
708 713 text = self.ui.edit("\n".join(edittext), user)
709 714 os.chdir(olddir)
710 715
711 716 lines = [line.rstrip() for line in text.rstrip().splitlines()]
712 717 while lines and not lines[0]:
713 718 del lines[0]
714 719 if not lines:
715 720 return None
716 721 text = '\n'.join(lines)
717 722 if branchname:
718 723 extra["branch"] = branchname
719 724 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
720 725 user, date, extra)
721 726 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
722 727 parent2=xp2)
723 728 tr.close()
724 729
725 730 if use_dirstate or update_dirstate:
726 731 self.dirstate.setparents(n)
727 732 if use_dirstate:
728 733 self.dirstate.update(new, "n")
729 734 self.dirstate.forget(remove)
730 735
731 736 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
732 737 return n
733 738
734 739 def walk(self, node=None, files=[], match=util.always, badmatch=None):
735 740 '''
736 741 walk recursively through the directory tree or a given
737 742 changeset, finding all files matched by the match
738 743 function
739 744
740 745 results are yielded in a tuple (src, filename), where src
741 746 is one of:
742 747 'f' the file was found in the directory tree
743 748 'm' the file was only in the dirstate and not in the tree
744 749 'b' file was not found and matched badmatch
745 750 '''
746 751
747 752 if node:
748 753 fdict = dict.fromkeys(files)
749 754 for fn in self.manifest.read(self.changelog.read(node)[0]):
750 755 for ffn in fdict:
751 756 # match if the file is the exact name or a directory
752 757 if ffn == fn or fn.startswith("%s/" % ffn):
753 758 del fdict[ffn]
754 759 break
755 760 if match(fn):
756 761 yield 'm', fn
757 762 for fn in fdict:
758 763 if badmatch and badmatch(fn):
759 764 if match(fn):
760 765 yield 'b', fn
761 766 else:
762 767 self.ui.warn(_('%s: No such file in rev %s\n') % (
763 768 util.pathto(self.getcwd(), fn), short(node)))
764 769 else:
765 770 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
766 771 yield src, fn
767 772
768 773 def status(self, node1=None, node2=None, files=[], match=util.always,
769 774 wlock=None, list_ignored=False, list_clean=False):
770 775 """return status of files between two nodes or node and working directory
771 776
772 777 If node1 is None, use the first dirstate parent instead.
773 778 If node2 is None, compare node1 with working directory.
774 779 """
775 780
776 781 def fcmp(fn, mf):
777 782 t1 = self.wread(fn)
778 783 return self.file(fn).cmp(mf.get(fn, nullid), t1)
779 784
780 785 def mfmatches(node):
781 786 change = self.changelog.read(node)
782 787 mf = self.manifest.read(change[0]).copy()
783 788 for fn in mf.keys():
784 789 if not match(fn):
785 790 del mf[fn]
786 791 return mf
787 792
788 793 modified, added, removed, deleted, unknown = [], [], [], [], []
789 794 ignored, clean = [], []
790 795
791 796 compareworking = False
792 797 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
793 798 compareworking = True
794 799
795 800 if not compareworking:
796 801 # read the manifest from node1 before the manifest from node2,
797 802 # so that we'll hit the manifest cache if we're going through
798 803 # all the revisions in parent->child order.
799 804 mf1 = mfmatches(node1)
800 805
801 806 # are we comparing the working directory?
802 807 if not node2:
803 808 if not wlock:
804 809 try:
805 810 wlock = self.wlock(wait=0)
806 811 except lock.LockException:
807 812 wlock = None
808 813 (lookup, modified, added, removed, deleted, unknown,
809 814 ignored, clean) = self.dirstate.status(files, match,
810 815 list_ignored, list_clean)
811 816
812 817 # are we comparing working dir against its parent?
813 818 if compareworking:
814 819 if lookup:
815 820 # do a full compare of any files that might have changed
816 821 mf2 = mfmatches(self.dirstate.parents()[0])
817 822 for f in lookup:
818 823 if fcmp(f, mf2):
819 824 modified.append(f)
820 825 else:
821 826 clean.append(f)
822 827 if wlock is not None:
823 828 self.dirstate.update([f], "n")
824 829 else:
825 830 # we are comparing working dir against non-parent
826 831 # generate a pseudo-manifest for the working dir
827 832 # XXX: create it in dirstate.py ?
828 833 mf2 = mfmatches(self.dirstate.parents()[0])
829 834 for f in lookup + modified + added:
830 835 mf2[f] = ""
831 836 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
832 837 for f in removed:
833 838 if f in mf2:
834 839 del mf2[f]
835 840 else:
836 841 # we are comparing two revisions
837 842 mf2 = mfmatches(node2)
838 843
839 844 if not compareworking:
840 845 # flush lists from dirstate before comparing manifests
841 846 modified, added, clean = [], [], []
842 847
843 848 # make sure to sort the files so we talk to the disk in a
844 849 # reasonable order
845 850 mf2keys = mf2.keys()
846 851 mf2keys.sort()
847 852 for fn in mf2keys:
848 853 if mf1.has_key(fn):
849 854 if mf1.flags(fn) != mf2.flags(fn) or \
850 855 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
851 856 modified.append(fn)
852 857 elif list_clean:
853 858 clean.append(fn)
854 859 del mf1[fn]
855 860 else:
856 861 added.append(fn)
857 862
858 863 removed = mf1.keys()
859 864
860 865 # sort and return results:
861 866 for l in modified, added, removed, deleted, unknown, ignored, clean:
862 867 l.sort()
863 868 return (modified, added, removed, deleted, unknown, ignored, clean)
864 869
865 870 def add(self, list, wlock=None):
866 871 if not wlock:
867 872 wlock = self.wlock()
868 873 for f in list:
869 874 p = self.wjoin(f)
870 875 if not os.path.exists(p):
871 876 self.ui.warn(_("%s does not exist!\n") % f)
872 877 elif not os.path.isfile(p):
873 878 self.ui.warn(_("%s not added: only files supported currently\n")
874 879 % f)
875 880 elif self.dirstate.state(f) in 'an':
876 881 self.ui.warn(_("%s already tracked!\n") % f)
877 882 else:
878 883 self.dirstate.update([f], "a")
879 884
880 885 def forget(self, list, wlock=None):
881 886 if not wlock:
882 887 wlock = self.wlock()
883 888 for f in list:
884 889 if self.dirstate.state(f) not in 'ai':
885 890 self.ui.warn(_("%s not added!\n") % f)
886 891 else:
887 892 self.dirstate.forget([f])
888 893
889 894 def remove(self, list, unlink=False, wlock=None):
890 895 if unlink:
891 896 for f in list:
892 897 try:
893 898 util.unlink(self.wjoin(f))
894 899 except OSError, inst:
895 900 if inst.errno != errno.ENOENT:
896 901 raise
897 902 if not wlock:
898 903 wlock = self.wlock()
899 904 for f in list:
900 905 p = self.wjoin(f)
901 906 if os.path.exists(p):
902 907 self.ui.warn(_("%s still exists!\n") % f)
903 908 elif self.dirstate.state(f) == 'a':
904 909 self.dirstate.forget([f])
905 910 elif f not in self.dirstate:
906 911 self.ui.warn(_("%s not tracked!\n") % f)
907 912 else:
908 913 self.dirstate.update([f], "r")
909 914
910 915 def undelete(self, list, wlock=None):
911 916 p = self.dirstate.parents()[0]
912 917 mn = self.changelog.read(p)[0]
913 918 m = self.manifest.read(mn)
914 919 if not wlock:
915 920 wlock = self.wlock()
916 921 for f in list:
917 922 if self.dirstate.state(f) not in "r":
918 923 self.ui.warn("%s not removed!\n" % f)
919 924 else:
920 925 t = self.file(f).read(m[f])
921 926 self.wwrite(f, t)
922 927 util.set_exec(self.wjoin(f), m.execf(f))
923 928 self.dirstate.update([f], "n")
924 929
925 930 def copy(self, source, dest, wlock=None):
926 931 p = self.wjoin(dest)
927 932 if not os.path.exists(p):
928 933 self.ui.warn(_("%s does not exist!\n") % dest)
929 934 elif not os.path.isfile(p):
930 935 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
931 936 else:
932 937 if not wlock:
933 938 wlock = self.wlock()
934 939 if self.dirstate.state(dest) == '?':
935 940 self.dirstate.update([dest], "a")
936 941 self.dirstate.copy(source, dest)
937 942
938 943 def heads(self, start=None):
939 944 heads = self.changelog.heads(start)
940 945 # sort the output in rev descending order
941 946 heads = [(-self.changelog.rev(h), h) for h in heads]
942 947 heads.sort()
943 948 return [n for (r, n) in heads]
944 949
945 950 # branchlookup returns a dict giving a list of branches for
946 951 # each head. A branch is defined as the tag of a node or
947 952 # the branch of the node's parents. If a node has multiple
948 953 # branch tags, tags are eliminated if they are visible from other
949 954 # branch tags.
950 955 #
951 956 # So, for this graph: a->b->c->d->e
952 957 # \ /
953 958 # aa -----/
954 959 # a has tag 2.6.12
955 960 # d has tag 2.6.13
956 961 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
957 962 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
958 963 # from the list.
959 964 #
960 965 # It is possible that more than one head will have the same branch tag.
961 966 # callers need to check the result for multiple heads under the same
962 967 # branch tag if that is a problem for them (ie checkout of a specific
963 968 # branch).
964 969 #
965 970 # passing in a specific branch will limit the depth of the search
966 971 # through the parents. It won't limit the branches returned in the
967 972 # result though.
968 973 def branchlookup(self, heads=None, branch=None):
969 974 if not heads:
970 975 heads = self.heads()
971 976 headt = [ h for h in heads ]
972 977 chlog = self.changelog
973 978 branches = {}
974 979 merges = []
975 980 seenmerge = {}
976 981
977 982 # traverse the tree once for each head, recording in the branches
978 983 # dict which tags are visible from this head. The branches
979 984 # dict also records which tags are visible from each tag
980 985 # while we traverse.
981 986 while headt or merges:
982 987 if merges:
983 988 n, found = merges.pop()
984 989 visit = [n]
985 990 else:
986 991 h = headt.pop()
987 992 visit = [h]
988 993 found = [h]
989 994 seen = {}
990 995 while visit:
991 996 n = visit.pop()
992 997 if n in seen:
993 998 continue
994 999 pp = chlog.parents(n)
995 1000 tags = self.nodetags(n)
996 1001 if tags:
997 1002 for x in tags:
998 1003 if x == 'tip':
999 1004 continue
1000 1005 for f in found:
1001 1006 branches.setdefault(f, {})[n] = 1
1002 1007 branches.setdefault(n, {})[n] = 1
1003 1008 break
1004 1009 if n not in found:
1005 1010 found.append(n)
1006 1011 if branch in tags:
1007 1012 continue
1008 1013 seen[n] = 1
1009 1014 if pp[1] != nullid and n not in seenmerge:
1010 1015 merges.append((pp[1], [x for x in found]))
1011 1016 seenmerge[n] = 1
1012 1017 if pp[0] != nullid:
1013 1018 visit.append(pp[0])
1014 1019 # traverse the branches dict, eliminating branch tags from each
1015 1020 # head that are visible from another branch tag for that head.
1016 1021 out = {}
1017 1022 viscache = {}
1018 1023 for h in heads:
1019 1024 def visible(node):
1020 1025 if node in viscache:
1021 1026 return viscache[node]
1022 1027 ret = {}
1023 1028 visit = [node]
1024 1029 while visit:
1025 1030 x = visit.pop()
1026 1031 if x in viscache:
1027 1032 ret.update(viscache[x])
1028 1033 elif x not in ret:
1029 1034 ret[x] = 1
1030 1035 if x in branches:
1031 1036 visit[len(visit):] = branches[x].keys()
1032 1037 viscache[node] = ret
1033 1038 return ret
1034 1039 if h not in branches:
1035 1040 continue
1036 1041 # O(n^2), but somewhat limited. This only searches the
1037 1042 # tags visible from a specific head, not all the tags in the
1038 1043 # whole repo.
1039 1044 for b in branches[h]:
1040 1045 vis = False
1041 1046 for bb in branches[h].keys():
1042 1047 if b != bb:
1043 1048 if b in visible(bb):
1044 1049 vis = True
1045 1050 break
1046 1051 if not vis:
1047 1052 l = out.setdefault(h, [])
1048 1053 l[len(l):] = self.nodetags(b)
1049 1054 return out
1050 1055
1051 1056 def branches(self, nodes):
1052 1057 if not nodes:
1053 1058 nodes = [self.changelog.tip()]
1054 1059 b = []
1055 1060 for n in nodes:
1056 1061 t = n
1057 1062 while 1:
1058 1063 p = self.changelog.parents(n)
1059 1064 if p[1] != nullid or p[0] == nullid:
1060 1065 b.append((t, n, p[0], p[1]))
1061 1066 break
1062 1067 n = p[0]
1063 1068 return b
1064 1069
1065 1070 def between(self, pairs):
1066 1071 r = []
1067 1072
1068 1073 for top, bottom in pairs:
1069 1074 n, l, i = top, [], 0
1070 1075 f = 1
1071 1076
1072 1077 while n != bottom:
1073 1078 p = self.changelog.parents(n)[0]
1074 1079 if i == f:
1075 1080 l.append(n)
1076 1081 f = f * 2
1077 1082 n = p
1078 1083 i += 1
1079 1084
1080 1085 r.append(l)
1081 1086
1082 1087 return r
1083 1088
1084 1089 def findincoming(self, remote, base=None, heads=None, force=False):
1085 1090 """Return list of roots of the subsets of missing nodes from remote
1086 1091
1087 1092 If base dict is specified, assume that these nodes and their parents
1088 1093 exist on the remote side and that no child of a node of base exists
1089 1094 in both remote and self.
1090 1095 Furthermore base will be updated to include the nodes that exists
1091 1096 in self and remote but no children exists in self and remote.
1092 1097 If a list of heads is specified, return only nodes which are heads
1093 1098 or ancestors of these heads.
1094 1099
1095 1100 All the ancestors of base are in self and in remote.
1096 1101 All the descendants of the list returned are missing in self.
1097 1102 (and so we know that the rest of the nodes are missing in remote, see
1098 1103 outgoing)
1099 1104 """
1100 1105 m = self.changelog.nodemap
1101 1106 search = []
1102 1107 fetch = {}
1103 1108 seen = {}
1104 1109 seenbranch = {}
1105 1110 if base == None:
1106 1111 base = {}
1107 1112
1108 1113 if not heads:
1109 1114 heads = remote.heads()
1110 1115
1111 1116 if self.changelog.tip() == nullid:
1112 1117 base[nullid] = 1
1113 1118 if heads != [nullid]:
1114 1119 return [nullid]
1115 1120 return []
1116 1121
1117 1122 # assume we're closer to the tip than the root
1118 1123 # and start by examining the heads
1119 1124 self.ui.status(_("searching for changes\n"))
1120 1125
1121 1126 unknown = []
1122 1127 for h in heads:
1123 1128 if h not in m:
1124 1129 unknown.append(h)
1125 1130 else:
1126 1131 base[h] = 1
1127 1132
1128 1133 if not unknown:
1129 1134 return []
1130 1135
1131 1136 req = dict.fromkeys(unknown)
1132 1137 reqcnt = 0
1133 1138
1134 1139 # search through remote branches
1135 1140 # a 'branch' here is a linear segment of history, with four parts:
1136 1141 # head, root, first parent, second parent
1137 1142 # (a branch always has two parents (or none) by definition)
1138 1143 unknown = remote.branches(unknown)
1139 1144 while unknown:
1140 1145 r = []
1141 1146 while unknown:
1142 1147 n = unknown.pop(0)
1143 1148 if n[0] in seen:
1144 1149 continue
1145 1150
1146 1151 self.ui.debug(_("examining %s:%s\n")
1147 1152 % (short(n[0]), short(n[1])))
1148 1153 if n[0] == nullid: # found the end of the branch
1149 1154 pass
1150 1155 elif n in seenbranch:
1151 1156 self.ui.debug(_("branch already found\n"))
1152 1157 continue
1153 1158 elif n[1] and n[1] in m: # do we know the base?
1154 1159 self.ui.debug(_("found incomplete branch %s:%s\n")
1155 1160 % (short(n[0]), short(n[1])))
1156 1161 search.append(n) # schedule branch range for scanning
1157 1162 seenbranch[n] = 1
1158 1163 else:
1159 1164 if n[1] not in seen and n[1] not in fetch:
1160 1165 if n[2] in m and n[3] in m:
1161 1166 self.ui.debug(_("found new changeset %s\n") %
1162 1167 short(n[1]))
1163 1168 fetch[n[1]] = 1 # earliest unknown
1164 1169 for p in n[2:4]:
1165 1170 if p in m:
1166 1171 base[p] = 1 # latest known
1167 1172
1168 1173 for p in n[2:4]:
1169 1174 if p not in req and p not in m:
1170 1175 r.append(p)
1171 1176 req[p] = 1
1172 1177 seen[n[0]] = 1
1173 1178
1174 1179 if r:
1175 1180 reqcnt += 1
1176 1181 self.ui.debug(_("request %d: %s\n") %
1177 1182 (reqcnt, " ".join(map(short, r))))
1178 1183 for p in xrange(0, len(r), 10):
1179 1184 for b in remote.branches(r[p:p+10]):
1180 1185 self.ui.debug(_("received %s:%s\n") %
1181 1186 (short(b[0]), short(b[1])))
1182 1187 unknown.append(b)
1183 1188
1184 1189 # do binary search on the branches we found
1185 1190 while search:
1186 1191 n = search.pop(0)
1187 1192 reqcnt += 1
1188 1193 l = remote.between([(n[0], n[1])])[0]
1189 1194 l.append(n[1])
1190 1195 p = n[0]
1191 1196 f = 1
1192 1197 for i in l:
1193 1198 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1194 1199 if i in m:
1195 1200 if f <= 2:
1196 1201 self.ui.debug(_("found new branch changeset %s\n") %
1197 1202 short(p))
1198 1203 fetch[p] = 1
1199 1204 base[i] = 1
1200 1205 else:
1201 1206 self.ui.debug(_("narrowed branch search to %s:%s\n")
1202 1207 % (short(p), short(i)))
1203 1208 search.append((p, i))
1204 1209 break
1205 1210 p, f = i, f * 2
1206 1211
1207 1212 # sanity check our fetch list
1208 1213 for f in fetch.keys():
1209 1214 if f in m:
1210 1215 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1211 1216
1212 1217 if base.keys() == [nullid]:
1213 1218 if force:
1214 1219 self.ui.warn(_("warning: repository is unrelated\n"))
1215 1220 else:
1216 1221 raise util.Abort(_("repository is unrelated"))
1217 1222
1218 1223 self.ui.debug(_("found new changesets starting at ") +
1219 1224 " ".join([short(f) for f in fetch]) + "\n")
1220 1225
1221 1226 self.ui.debug(_("%d total queries\n") % reqcnt)
1222 1227
1223 1228 return fetch.keys()
1224 1229
1225 1230 def findoutgoing(self, remote, base=None, heads=None, force=False):
1226 1231 """Return list of nodes that are roots of subsets not in remote
1227 1232
1228 1233 If base dict is specified, assume that these nodes and their parents
1229 1234 exist on the remote side.
1230 1235 If a list of heads is specified, return only nodes which are heads
1231 1236 or ancestors of these heads, and return a second element which
1232 1237 contains all remote heads which get new children.
1233 1238 """
1234 1239 if base == None:
1235 1240 base = {}
1236 1241 self.findincoming(remote, base, heads, force=force)
1237 1242
1238 1243 self.ui.debug(_("common changesets up to ")
1239 1244 + " ".join(map(short, base.keys())) + "\n")
1240 1245
1241 1246 remain = dict.fromkeys(self.changelog.nodemap)
1242 1247
1243 1248 # prune everything remote has from the tree
1244 1249 del remain[nullid]
1245 1250 remove = base.keys()
1246 1251 while remove:
1247 1252 n = remove.pop(0)
1248 1253 if n in remain:
1249 1254 del remain[n]
1250 1255 for p in self.changelog.parents(n):
1251 1256 remove.append(p)
1252 1257
1253 1258 # find every node whose parents have been pruned
1254 1259 subset = []
1255 1260 # find every remote head that will get new children
1256 1261 updated_heads = {}
1257 1262 for n in remain:
1258 1263 p1, p2 = self.changelog.parents(n)
1259 1264 if p1 not in remain and p2 not in remain:
1260 1265 subset.append(n)
1261 1266 if heads:
1262 1267 if p1 in heads:
1263 1268 updated_heads[p1] = True
1264 1269 if p2 in heads:
1265 1270 updated_heads[p2] = True
1266 1271
1267 1272 # this is the set of all roots we have to push
1268 1273 if heads:
1269 1274 return subset, updated_heads.keys()
1270 1275 else:
1271 1276 return subset
1272 1277
1273 1278 def pull(self, remote, heads=None, force=False, lock=None):
1274 1279 mylock = False
1275 1280 if not lock:
1276 1281 lock = self.lock()
1277 1282 mylock = True
1278 1283
1279 1284 try:
1280 1285 fetch = self.findincoming(remote, force=force)
1281 1286 if fetch == [nullid]:
1282 1287 self.ui.status(_("requesting all changes\n"))
1283 1288
1284 1289 if not fetch:
1285 1290 self.ui.status(_("no changes found\n"))
1286 1291 return 0
1287 1292
1288 1293 if heads is None:
1289 1294 cg = remote.changegroup(fetch, 'pull')
1290 1295 else:
1291 1296 if 'changegroupsubset' not in remote.capabilities:
1292 1297 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1293 1298 cg = remote.changegroupsubset(fetch, heads, 'pull')
1294 1299 return self.addchangegroup(cg, 'pull', remote.url())
1295 1300 finally:
1296 1301 if mylock:
1297 1302 lock.release()
1298 1303
1299 1304 def push(self, remote, force=False, revs=None):
1300 1305 # there are two ways to push to remote repo:
1301 1306 #
1302 1307 # addchangegroup assumes local user can lock remote
1303 1308 # repo (local filesystem, old ssh servers).
1304 1309 #
1305 1310 # unbundle assumes local user cannot lock remote repo (new ssh
1306 1311 # servers, http servers).
1307 1312
1308 1313 if remote.capable('unbundle'):
1309 1314 return self.push_unbundle(remote, force, revs)
1310 1315 return self.push_addchangegroup(remote, force, revs)
1311 1316
1312 1317 def prepush(self, remote, force, revs):
1313 1318 base = {}
1314 1319 remote_heads = remote.heads()
1315 1320 inc = self.findincoming(remote, base, remote_heads, force=force)
1316 1321
1317 1322 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1318 1323 if revs is not None:
1319 1324 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1320 1325 else:
1321 1326 bases, heads = update, self.changelog.heads()
1322 1327
1323 1328 if not bases:
1324 1329 self.ui.status(_("no changes found\n"))
1325 1330 return None, 1
1326 1331 elif not force:
1327 1332 # check if we're creating new remote heads
1328 1333 # to be a remote head after push, node must be either
1329 1334 # - unknown locally
1330 1335 # - a local outgoing head descended from update
1331 1336 # - a remote head that's known locally and not
1332 1337 # ancestral to an outgoing head
1333 1338
1334 1339 warn = 0
1335 1340
1336 1341 if remote_heads == [nullid]:
1337 1342 warn = 0
1338 1343 elif not revs and len(heads) > len(remote_heads):
1339 1344 warn = 1
1340 1345 else:
1341 1346 newheads = list(heads)
1342 1347 for r in remote_heads:
1343 1348 if r in self.changelog.nodemap:
1344 1349 desc = self.changelog.heads(r)
1345 1350 l = [h for h in heads if h in desc]
1346 1351 if not l:
1347 1352 newheads.append(r)
1348 1353 else:
1349 1354 newheads.append(r)
1350 1355 if len(newheads) > len(remote_heads):
1351 1356 warn = 1
1352 1357
1353 1358 if warn:
1354 1359 self.ui.warn(_("abort: push creates new remote branches!\n"))
1355 1360 self.ui.status(_("(did you forget to merge?"
1356 1361 " use push -f to force)\n"))
1357 1362 return None, 1
1358 1363 elif inc:
1359 1364 self.ui.warn(_("note: unsynced remote changes!\n"))
1360 1365
1361 1366
1362 1367 if revs is None:
1363 1368 cg = self.changegroup(update, 'push')
1364 1369 else:
1365 1370 cg = self.changegroupsubset(update, revs, 'push')
1366 1371 return cg, remote_heads
1367 1372
1368 1373 def push_addchangegroup(self, remote, force, revs):
1369 1374 lock = remote.lock()
1370 1375
1371 1376 ret = self.prepush(remote, force, revs)
1372 1377 if ret[0] is not None:
1373 1378 cg, remote_heads = ret
1374 1379 return remote.addchangegroup(cg, 'push', self.url())
1375 1380 return ret[1]
1376 1381
1377 1382 def push_unbundle(self, remote, force, revs):
1378 1383 # local repo finds heads on server, finds out what revs it
1379 1384 # must push. once revs transferred, if server finds it has
1380 1385 # different heads (someone else won commit/push race), server
1381 1386 # aborts.
1382 1387
1383 1388 ret = self.prepush(remote, force, revs)
1384 1389 if ret[0] is not None:
1385 1390 cg, remote_heads = ret
1386 1391 if force: remote_heads = ['force']
1387 1392 return remote.unbundle(cg, remote_heads, 'push')
1388 1393 return ret[1]
1389 1394
1390 1395 def changegroupinfo(self, nodes):
1391 1396 self.ui.note(_("%d changesets found\n") % len(nodes))
1392 1397 if self.ui.debugflag:
1393 1398 self.ui.debug(_("List of changesets:\n"))
1394 1399 for node in nodes:
1395 1400 self.ui.debug("%s\n" % hex(node))
1396 1401
1397 1402 def changegroupsubset(self, bases, heads, source):
1398 1403 """This function generates a changegroup consisting of all the nodes
1399 1404 that are descendents of any of the bases, and ancestors of any of
1400 1405 the heads.
1401 1406
1402 1407 It is fairly complex as determining which filenodes and which
1403 1408 manifest nodes need to be included for the changeset to be complete
1404 1409 is non-trivial.
1405 1410
1406 1411 Another wrinkle is doing the reverse, figuring out which changeset in
1407 1412 the changegroup a particular filenode or manifestnode belongs to."""
1408 1413
1409 1414 self.hook('preoutgoing', throw=True, source=source)
1410 1415
1411 1416 # Set up some initial variables
1412 1417 # Make it easy to refer to self.changelog
1413 1418 cl = self.changelog
1414 1419 # msng is short for missing - compute the list of changesets in this
1415 1420 # changegroup.
1416 1421 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1417 1422 self.changegroupinfo(msng_cl_lst)
1418 1423 # Some bases may turn out to be superfluous, and some heads may be
1419 1424 # too. nodesbetween will return the minimal set of bases and heads
1420 1425 # necessary to re-create the changegroup.
1421 1426
1422 1427 # Known heads are the list of heads that it is assumed the recipient
1423 1428 # of this changegroup will know about.
1424 1429 knownheads = {}
1425 1430 # We assume that all parents of bases are known heads.
1426 1431 for n in bases:
1427 1432 for p in cl.parents(n):
1428 1433 if p != nullid:
1429 1434 knownheads[p] = 1
1430 1435 knownheads = knownheads.keys()
1431 1436 if knownheads:
1432 1437 # Now that we know what heads are known, we can compute which
1433 1438 # changesets are known. The recipient must know about all
1434 1439 # changesets required to reach the known heads from the null
1435 1440 # changeset.
1436 1441 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1437 1442 junk = None
1438 1443 # Transform the list into an ersatz set.
1439 1444 has_cl_set = dict.fromkeys(has_cl_set)
1440 1445 else:
1441 1446 # If there were no known heads, the recipient cannot be assumed to
1442 1447 # know about any changesets.
1443 1448 has_cl_set = {}
1444 1449
1445 1450 # Make it easy to refer to self.manifest
1446 1451 mnfst = self.manifest
1447 1452 # We don't know which manifests are missing yet
1448 1453 msng_mnfst_set = {}
1449 1454 # Nor do we know which filenodes are missing.
1450 1455 msng_filenode_set = {}
1451 1456
1452 1457 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1453 1458 junk = None
1454 1459
1455 1460 # A changeset always belongs to itself, so the changenode lookup
1456 1461 # function for a changenode is identity.
1457 1462 def identity(x):
1458 1463 return x
1459 1464
1460 1465 # A function generating function. Sets up an environment for the
1461 1466 # inner function.
1462 1467 def cmp_by_rev_func(revlog):
1463 1468 # Compare two nodes by their revision number in the environment's
1464 1469 # revision history. Since the revision number both represents the
1465 1470 # most efficient order to read the nodes in, and represents a
1466 1471 # topological sorting of the nodes, this function is often useful.
1467 1472 def cmp_by_rev(a, b):
1468 1473 return cmp(revlog.rev(a), revlog.rev(b))
1469 1474 return cmp_by_rev
1470 1475
1471 1476 # If we determine that a particular file or manifest node must be a
1472 1477 # node that the recipient of the changegroup will already have, we can
1473 1478 # also assume the recipient will have all the parents. This function
1474 1479 # prunes them from the set of missing nodes.
1475 1480 def prune_parents(revlog, hasset, msngset):
1476 1481 haslst = hasset.keys()
1477 1482 haslst.sort(cmp_by_rev_func(revlog))
1478 1483 for node in haslst:
1479 1484 parentlst = [p for p in revlog.parents(node) if p != nullid]
1480 1485 while parentlst:
1481 1486 n = parentlst.pop()
1482 1487 if n not in hasset:
1483 1488 hasset[n] = 1
1484 1489 p = [p for p in revlog.parents(n) if p != nullid]
1485 1490 parentlst.extend(p)
1486 1491 for n in hasset:
1487 1492 msngset.pop(n, None)
1488 1493
1489 1494 # This is a function generating function used to set up an environment
1490 1495 # for the inner function to execute in.
1491 1496 def manifest_and_file_collector(changedfileset):
1492 1497 # This is an information gathering function that gathers
1493 1498 # information from each changeset node that goes out as part of
1494 1499 # the changegroup. The information gathered is a list of which
1495 1500 # manifest nodes are potentially required (the recipient may
1496 1501 # already have them) and total list of all files which were
1497 1502 # changed in any changeset in the changegroup.
1498 1503 #
1499 1504 # We also remember the first changenode we saw any manifest
1500 1505 # referenced by so we can later determine which changenode 'owns'
1501 1506 # the manifest.
1502 1507 def collect_manifests_and_files(clnode):
1503 1508 c = cl.read(clnode)
1504 1509 for f in c[3]:
1505 1510 # This is to make sure we only have one instance of each
1506 1511 # filename string for each filename.
1507 1512 changedfileset.setdefault(f, f)
1508 1513 msng_mnfst_set.setdefault(c[0], clnode)
1509 1514 return collect_manifests_and_files
1510 1515
1511 1516 # Figure out which manifest nodes (of the ones we think might be part
1512 1517 # of the changegroup) the recipient must know about and remove them
1513 1518 # from the changegroup.
1514 1519 def prune_manifests():
1515 1520 has_mnfst_set = {}
1516 1521 for n in msng_mnfst_set:
1517 1522 # If a 'missing' manifest thinks it belongs to a changenode
1518 1523 # the recipient is assumed to have, obviously the recipient
1519 1524 # must have that manifest.
1520 1525 linknode = cl.node(mnfst.linkrev(n))
1521 1526 if linknode in has_cl_set:
1522 1527 has_mnfst_set[n] = 1
1523 1528 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1524 1529
1525 1530 # Use the information collected in collect_manifests_and_files to say
1526 1531 # which changenode any manifestnode belongs to.
1527 1532 def lookup_manifest_link(mnfstnode):
1528 1533 return msng_mnfst_set[mnfstnode]
1529 1534
1530 1535 # A function generating function that sets up the initial environment
1531 1536 # the inner function.
1532 1537 def filenode_collector(changedfiles):
1533 1538 next_rev = [0]
1534 1539 # This gathers information from each manifestnode included in the
1535 1540 # changegroup about which filenodes the manifest node references
1536 1541 # so we can include those in the changegroup too.
1537 1542 #
1538 1543 # It also remembers which changenode each filenode belongs to. It
1539 1544 # does this by assuming the a filenode belongs to the changenode
1540 1545 # the first manifest that references it belongs to.
1541 1546 def collect_msng_filenodes(mnfstnode):
1542 1547 r = mnfst.rev(mnfstnode)
1543 1548 if r == next_rev[0]:
1544 1549 # If the last rev we looked at was the one just previous,
1545 1550 # we only need to see a diff.
1546 1551 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1547 1552 # For each line in the delta
1548 1553 for dline in delta.splitlines():
1549 1554 # get the filename and filenode for that line
1550 1555 f, fnode = dline.split('\0')
1551 1556 fnode = bin(fnode[:40])
1552 1557 f = changedfiles.get(f, None)
1553 1558 # And if the file is in the list of files we care
1554 1559 # about.
1555 1560 if f is not None:
1556 1561 # Get the changenode this manifest belongs to
1557 1562 clnode = msng_mnfst_set[mnfstnode]
1558 1563 # Create the set of filenodes for the file if
1559 1564 # there isn't one already.
1560 1565 ndset = msng_filenode_set.setdefault(f, {})
1561 1566 # And set the filenode's changelog node to the
1562 1567 # manifest's if it hasn't been set already.
1563 1568 ndset.setdefault(fnode, clnode)
1564 1569 else:
1565 1570 # Otherwise we need a full manifest.
1566 1571 m = mnfst.read(mnfstnode)
1567 1572 # For every file in we care about.
1568 1573 for f in changedfiles:
1569 1574 fnode = m.get(f, None)
1570 1575 # If it's in the manifest
1571 1576 if fnode is not None:
1572 1577 # See comments above.
1573 1578 clnode = msng_mnfst_set[mnfstnode]
1574 1579 ndset = msng_filenode_set.setdefault(f, {})
1575 1580 ndset.setdefault(fnode, clnode)
1576 1581 # Remember the revision we hope to see next.
1577 1582 next_rev[0] = r + 1
1578 1583 return collect_msng_filenodes
1579 1584
1580 1585 # We have a list of filenodes we think we need for a file, lets remove
1581 1586 # all those we now the recipient must have.
1582 1587 def prune_filenodes(f, filerevlog):
1583 1588 msngset = msng_filenode_set[f]
1584 1589 hasset = {}
1585 1590 # If a 'missing' filenode thinks it belongs to a changenode we
1586 1591 # assume the recipient must have, then the recipient must have
1587 1592 # that filenode.
1588 1593 for n in msngset:
1589 1594 clnode = cl.node(filerevlog.linkrev(n))
1590 1595 if clnode in has_cl_set:
1591 1596 hasset[n] = 1
1592 1597 prune_parents(filerevlog, hasset, msngset)
1593 1598
1594 1599 # A function generator function that sets up the a context for the
1595 1600 # inner function.
1596 1601 def lookup_filenode_link_func(fname):
1597 1602 msngset = msng_filenode_set[fname]
1598 1603 # Lookup the changenode the filenode belongs to.
1599 1604 def lookup_filenode_link(fnode):
1600 1605 return msngset[fnode]
1601 1606 return lookup_filenode_link
1602 1607
1603 1608 # Now that we have all theses utility functions to help out and
1604 1609 # logically divide up the task, generate the group.
1605 1610 def gengroup():
1606 1611 # The set of changed files starts empty.
1607 1612 changedfiles = {}
1608 1613 # Create a changenode group generator that will call our functions
1609 1614 # back to lookup the owning changenode and collect information.
1610 1615 group = cl.group(msng_cl_lst, identity,
1611 1616 manifest_and_file_collector(changedfiles))
1612 1617 for chnk in group:
1613 1618 yield chnk
1614 1619
1615 1620 # The list of manifests has been collected by the generator
1616 1621 # calling our functions back.
1617 1622 prune_manifests()
1618 1623 msng_mnfst_lst = msng_mnfst_set.keys()
1619 1624 # Sort the manifestnodes by revision number.
1620 1625 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1621 1626 # Create a generator for the manifestnodes that calls our lookup
1622 1627 # and data collection functions back.
1623 1628 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1624 1629 filenode_collector(changedfiles))
1625 1630 for chnk in group:
1626 1631 yield chnk
1627 1632
1628 1633 # These are no longer needed, dereference and toss the memory for
1629 1634 # them.
1630 1635 msng_mnfst_lst = None
1631 1636 msng_mnfst_set.clear()
1632 1637
1633 1638 changedfiles = changedfiles.keys()
1634 1639 changedfiles.sort()
1635 1640 # Go through all our files in order sorted by name.
1636 1641 for fname in changedfiles:
1637 1642 filerevlog = self.file(fname)
1638 1643 # Toss out the filenodes that the recipient isn't really
1639 1644 # missing.
1640 1645 if msng_filenode_set.has_key(fname):
1641 1646 prune_filenodes(fname, filerevlog)
1642 1647 msng_filenode_lst = msng_filenode_set[fname].keys()
1643 1648 else:
1644 1649 msng_filenode_lst = []
1645 1650 # If any filenodes are left, generate the group for them,
1646 1651 # otherwise don't bother.
1647 1652 if len(msng_filenode_lst) > 0:
1648 1653 yield changegroup.genchunk(fname)
1649 1654 # Sort the filenodes by their revision #
1650 1655 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1651 1656 # Create a group generator and only pass in a changenode
1652 1657 # lookup function as we need to collect no information
1653 1658 # from filenodes.
1654 1659 group = filerevlog.group(msng_filenode_lst,
1655 1660 lookup_filenode_link_func(fname))
1656 1661 for chnk in group:
1657 1662 yield chnk
1658 1663 if msng_filenode_set.has_key(fname):
1659 1664 # Don't need this anymore, toss it to free memory.
1660 1665 del msng_filenode_set[fname]
1661 1666 # Signal that no more groups are left.
1662 1667 yield changegroup.closechunk()
1663 1668
1664 1669 if msng_cl_lst:
1665 1670 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1666 1671
1667 1672 return util.chunkbuffer(gengroup())
1668 1673
1669 1674 def changegroup(self, basenodes, source):
1670 1675 """Generate a changegroup of all nodes that we have that a recipient
1671 1676 doesn't.
1672 1677
1673 1678 This is much easier than the previous function as we can assume that
1674 1679 the recipient has any changenode we aren't sending them."""
1675 1680
1676 1681 self.hook('preoutgoing', throw=True, source=source)
1677 1682
1678 1683 cl = self.changelog
1679 1684 nodes = cl.nodesbetween(basenodes, None)[0]
1680 1685 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1681 1686 self.changegroupinfo(nodes)
1682 1687
1683 1688 def identity(x):
1684 1689 return x
1685 1690
1686 1691 def gennodelst(revlog):
1687 1692 for r in xrange(0, revlog.count()):
1688 1693 n = revlog.node(r)
1689 1694 if revlog.linkrev(n) in revset:
1690 1695 yield n
1691 1696
1692 1697 def changed_file_collector(changedfileset):
1693 1698 def collect_changed_files(clnode):
1694 1699 c = cl.read(clnode)
1695 1700 for fname in c[3]:
1696 1701 changedfileset[fname] = 1
1697 1702 return collect_changed_files
1698 1703
1699 1704 def lookuprevlink_func(revlog):
1700 1705 def lookuprevlink(n):
1701 1706 return cl.node(revlog.linkrev(n))
1702 1707 return lookuprevlink
1703 1708
1704 1709 def gengroup():
1705 1710 # construct a list of all changed files
1706 1711 changedfiles = {}
1707 1712
1708 1713 for chnk in cl.group(nodes, identity,
1709 1714 changed_file_collector(changedfiles)):
1710 1715 yield chnk
1711 1716 changedfiles = changedfiles.keys()
1712 1717 changedfiles.sort()
1713 1718
1714 1719 mnfst = self.manifest
1715 1720 nodeiter = gennodelst(mnfst)
1716 1721 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1717 1722 yield chnk
1718 1723
1719 1724 for fname in changedfiles:
1720 1725 filerevlog = self.file(fname)
1721 1726 nodeiter = gennodelst(filerevlog)
1722 1727 nodeiter = list(nodeiter)
1723 1728 if nodeiter:
1724 1729 yield changegroup.genchunk(fname)
1725 1730 lookup = lookuprevlink_func(filerevlog)
1726 1731 for chnk in filerevlog.group(nodeiter, lookup):
1727 1732 yield chnk
1728 1733
1729 1734 yield changegroup.closechunk()
1730 1735
1731 1736 if nodes:
1732 1737 self.hook('outgoing', node=hex(nodes[0]), source=source)
1733 1738
1734 1739 return util.chunkbuffer(gengroup())
1735 1740
1736 1741 def addchangegroup(self, source, srctype, url):
1737 1742 """add changegroup to repo.
1738 1743 returns number of heads modified or added + 1."""
1739 1744
1740 1745 def csmap(x):
1741 1746 self.ui.debug(_("add changeset %s\n") % short(x))
1742 1747 return cl.count()
1743 1748
1744 1749 def revmap(x):
1745 1750 return cl.rev(x)
1746 1751
1747 1752 if not source:
1748 1753 return 0
1749 1754
1750 1755 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1751 1756
1752 1757 changesets = files = revisions = 0
1753 1758
1754 1759 tr = self.transaction()
1755 1760
1756 1761 # write changelog data to temp files so concurrent readers will not see
1757 1762 # inconsistent view
1758 1763 cl = None
1759 1764 try:
1760 1765 cl = appendfile.appendchangelog(self.sopener,
1761 1766 self.changelog.version)
1762 1767
1763 1768 oldheads = len(cl.heads())
1764 1769
1765 1770 # pull off the changeset group
1766 1771 self.ui.status(_("adding changesets\n"))
1767 1772 cor = cl.count() - 1
1768 1773 chunkiter = changegroup.chunkiter(source)
1769 1774 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1770 1775 raise util.Abort(_("received changelog group is empty"))
1771 1776 cnr = cl.count() - 1
1772 1777 changesets = cnr - cor
1773 1778
1774 1779 # pull off the manifest group
1775 1780 self.ui.status(_("adding manifests\n"))
1776 1781 chunkiter = changegroup.chunkiter(source)
1777 1782 # no need to check for empty manifest group here:
1778 1783 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1779 1784 # no new manifest will be created and the manifest group will
1780 1785 # be empty during the pull
1781 1786 self.manifest.addgroup(chunkiter, revmap, tr)
1782 1787
1783 1788 # process the files
1784 1789 self.ui.status(_("adding file changes\n"))
1785 1790 while 1:
1786 1791 f = changegroup.getchunk(source)
1787 1792 if not f:
1788 1793 break
1789 1794 self.ui.debug(_("adding %s revisions\n") % f)
1790 1795 fl = self.file(f)
1791 1796 o = fl.count()
1792 1797 chunkiter = changegroup.chunkiter(source)
1793 1798 if fl.addgroup(chunkiter, revmap, tr) is None:
1794 1799 raise util.Abort(_("received file revlog group is empty"))
1795 1800 revisions += fl.count() - o
1796 1801 files += 1
1797 1802
1798 1803 cl.writedata()
1799 1804 finally:
1800 1805 if cl:
1801 1806 cl.cleanup()
1802 1807
1803 1808 # make changelog see real files again
1804 1809 self.changelog = changelog.changelog(self.sopener,
1805 1810 self.changelog.version)
1806 1811 self.changelog.checkinlinesize(tr)
1807 1812
1808 1813 newheads = len(self.changelog.heads())
1809 1814 heads = ""
1810 1815 if oldheads and newheads != oldheads:
1811 1816 heads = _(" (%+d heads)") % (newheads - oldheads)
1812 1817
1813 1818 self.ui.status(_("added %d changesets"
1814 1819 " with %d changes to %d files%s\n")
1815 1820 % (changesets, revisions, files, heads))
1816 1821
1817 1822 if changesets > 0:
1818 1823 self.hook('pretxnchangegroup', throw=True,
1819 1824 node=hex(self.changelog.node(cor+1)), source=srctype,
1820 1825 url=url)
1821 1826
1822 1827 tr.close()
1823 1828
1824 1829 if changesets > 0:
1825 1830 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1826 1831 source=srctype, url=url)
1827 1832
1828 1833 for i in xrange(cor + 1, cnr + 1):
1829 1834 self.hook("incoming", node=hex(self.changelog.node(i)),
1830 1835 source=srctype, url=url)
1831 1836
1832 1837 return newheads - oldheads + 1
1833 1838
1834 1839
1835 1840 def stream_in(self, remote):
1836 1841 fp = remote.stream_out()
1837 1842 l = fp.readline()
1838 1843 try:
1839 1844 resp = int(l)
1840 1845 except ValueError:
1841 1846 raise util.UnexpectedOutput(
1842 1847 _('Unexpected response from remote server:'), l)
1843 1848 if resp == 1:
1844 1849 raise util.Abort(_('operation forbidden by server'))
1845 1850 elif resp == 2:
1846 1851 raise util.Abort(_('locking the remote repository failed'))
1847 1852 elif resp != 0:
1848 1853 raise util.Abort(_('the server sent an unknown error code'))
1849 1854 self.ui.status(_('streaming all changes\n'))
1850 1855 l = fp.readline()
1851 1856 try:
1852 1857 total_files, total_bytes = map(int, l.split(' ', 1))
1853 1858 except ValueError, TypeError:
1854 1859 raise util.UnexpectedOutput(
1855 1860 _('Unexpected response from remote server:'), l)
1856 1861 self.ui.status(_('%d files to transfer, %s of data\n') %
1857 1862 (total_files, util.bytecount(total_bytes)))
1858 1863 start = time.time()
1859 1864 for i in xrange(total_files):
1860 1865 # XXX doesn't support '\n' or '\r' in filenames
1861 1866 l = fp.readline()
1862 1867 try:
1863 1868 name, size = l.split('\0', 1)
1864 1869 size = int(size)
1865 1870 except ValueError, TypeError:
1866 1871 raise util.UnexpectedOutput(
1867 1872 _('Unexpected response from remote server:'), l)
1868 1873 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1869 1874 ofp = self.sopener(name, 'w')
1870 1875 for chunk in util.filechunkiter(fp, limit=size):
1871 1876 ofp.write(chunk)
1872 1877 ofp.close()
1873 1878 elapsed = time.time() - start
1874 1879 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1875 1880 (util.bytecount(total_bytes), elapsed,
1876 1881 util.bytecount(total_bytes / elapsed)))
1877 1882 self.reload()
1878 1883 return len(self.heads()) + 1
1879 1884
1880 1885 def clone(self, remote, heads=[], stream=False):
1881 1886 '''clone remote repository.
1882 1887
1883 1888 keyword arguments:
1884 1889 heads: list of revs to clone (forces use of pull)
1885 1890 stream: use streaming clone if possible'''
1886 1891
1887 1892 # now, all clients that can request uncompressed clones can
1888 1893 # read repo formats supported by all servers that can serve
1889 1894 # them.
1890 1895
1891 1896 # if revlog format changes, client will have to check version
1892 1897 # and format flags on "stream" capability, and use
1893 1898 # uncompressed only if compatible.
1894 1899
1895 1900 if stream and not heads and remote.capable('stream'):
1896 1901 return self.stream_in(remote)
1897 1902 return self.pull(remote, heads)
1898 1903
1899 1904 # used to avoid circular references so destructors work
1900 def aftertrans(base):
1901 p = base
1905 def aftertrans(files):
1906 renamefiles = [tuple(t) for t in files]
1902 1907 def a():
1903 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1904 util.rename(os.path.join(p, "journal.dirstate"),
1905 os.path.join(p, "undo.dirstate"))
1908 for src, dest in renamefiles:
1909 util.rename(src, dest)
1906 1910 return a
1907 1911
1908 1912 def instance(ui, path, create):
1909 1913 return localrepository(ui, util.drop_scheme('file', path), create)
1910 1914
1911 1915 def islocal(path):
1912 1916 return True
@@ -1,65 +1,66 b''
1 1 # statichttprepo.py - simple http repository class for mercurial
2 2 #
3 3 # This provides read-only repo access to repositories exported via static http
4 4 #
5 5 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms
8 8 # of the GNU General Public License, incorporated herein by reference.
9 9
10 10 from demandload import *
11 11 from i18n import gettext as _
12 12 demandload(globals(), "changelog filelog httprangereader")
13 13 demandload(globals(), "localrepo manifest os urllib urllib2 util")
14 14
15 15 class rangereader(httprangereader.httprangereader):
16 16 def read(self, size=None):
17 17 try:
18 18 return httprangereader.httprangereader.read(self, size)
19 19 except urllib2.HTTPError, inst:
20 20 raise IOError(None, inst)
21 21 except urllib2.URLError, inst:
22 22 raise IOError(None, inst.reason[1])
23 23
24 24 def opener(base):
25 25 """return a function that opens files over http"""
26 26 p = base
27 27 def o(path, mode="r"):
28 28 f = os.path.join(p, urllib.quote(path))
29 29 return rangereader(f)
30 30 return o
31 31
32 32 class statichttprepository(localrepo.localrepository):
33 33 def __init__(self, ui, path):
34 34 self._url = path
35 35 self.path = (path + "/.hg")
36 self.spath = self.path
36 37 self.ui = ui
37 38 self.revlogversion = 0
38 39 self.opener = opener(self.path)
39 self.sopener = opener(self.path)
40 self.manifest = manifest.manifest(self.opener)
41 self.changelog = changelog.changelog(self.opener)
40 self.sopener = opener(self.spath)
41 self.manifest = manifest.manifest(self.sopener)
42 self.changelog = changelog.changelog(self.sopener)
42 43 self.tagscache = None
43 44 self.nodetagscache = None
44 45 self.encodepats = None
45 46 self.decodepats = None
46 47
47 48 def url(self):
48 49 return 'static-' + self._url
49 50
50 51 def dev(self):
51 52 return -1
52 53
53 54 def local(self):
54 55 return False
55 56
56 57 def instance(ui, path, create):
57 58 if create:
58 59 raise util.Abort(_('cannot create new static-http repository'))
59 60 if path.startswith('old-http:'):
60 61 ui.warn(_("old-http:// syntax is deprecated, "
61 62 "please use static-http:// instead\n"))
62 63 path = path[4:]
63 64 else:
64 65 path = path[7:]
65 66 return statichttprepository(ui, path)
@@ -1,95 +1,95 b''
1 1 # streamclone.py - streaming clone server support for mercurial
2 2 #
3 3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from demandload import demandload
9 9 from i18n import gettext as _
10 10 demandload(globals(), "os stat util lock")
11 11
12 12 # if server supports streaming clone, it advertises "stream"
13 13 # capability with value that is version+flags of repo it is serving.
14 14 # client only streams if it can read that repo format.
15 15
16 16 def walkrepo(root):
17 17 '''iterate over metadata files in repository.
18 18 walk in natural (sorted) order.
19 19 yields 2-tuples: name of .d or .i file, size of file.'''
20 20
21 21 strip_count = len(root) + len(os.sep)
22 22 def walk(path, recurse):
23 23 ents = os.listdir(path)
24 24 ents.sort()
25 25 for e in ents:
26 26 pe = os.path.join(path, e)
27 27 st = os.lstat(pe)
28 28 if stat.S_ISDIR(st.st_mode):
29 29 if recurse:
30 30 for x in walk(pe, True):
31 31 yield x
32 32 else:
33 33 if not stat.S_ISREG(st.st_mode) or len(e) < 2:
34 34 continue
35 35 sfx = e[-2:]
36 36 if sfx in ('.d', '.i'):
37 37 yield pe[strip_count:], st.st_size
38 38 # write file data first
39 39 for x in walk(os.path.join(root, 'data'), True):
40 40 yield x
41 41 # write manifest before changelog
42 42 meta = list(walk(root, False))
43 43 meta.sort()
44 44 meta.reverse()
45 45 for x in meta:
46 46 yield x
47 47
48 48 # stream file format is simple.
49 49 #
50 50 # server writes out line that says how many files, how many total
51 51 # bytes. separator is ascii space, byte counts are strings.
52 52 #
53 53 # then for each file:
54 54 #
55 55 # server writes out line that says file name, how many bytes in
56 56 # file. separator is ascii nul, byte count is string.
57 57 #
58 58 # server writes out raw file data.
59 59
60 60 def stream_out(repo, fileobj):
61 61 '''stream out all metadata files in repository.
62 62 writes to file-like object, must support write() and optional flush().'''
63 63
64 64 if not repo.ui.configbool('server', 'uncompressed'):
65 65 fileobj.write('1\n')
66 66 return
67 67
68 68 # get consistent snapshot of repo. lock during scan so lock not
69 69 # needed while we stream, and commits can happen.
70 70 try:
71 71 repolock = repo.lock()
72 72 except (lock.LockHeld, lock.LockUnavailable), inst:
73 73 repo.ui.warn('locking the repository failed: %s\n' % (inst,))
74 74 fileobj.write('2\n')
75 75 return
76 76
77 77 fileobj.write('0\n')
78 78 repo.ui.debug('scanning\n')
79 79 entries = []
80 80 total_bytes = 0
81 for name, size in walkrepo(repo.path):
81 for name, size in walkrepo(repo.spath):
82 82 entries.append((name, size))
83 83 total_bytes += size
84 84 repolock.release()
85 85
86 86 repo.ui.debug('%d files, %d bytes to transfer\n' %
87 87 (len(entries), total_bytes))
88 88 fileobj.write('%d %d\n' % (len(entries), total_bytes))
89 89 for name, size in entries:
90 90 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
91 91 fileobj.write('%s\0%d\n' % (name, size))
92 for chunk in util.filechunkiter(repo.opener(name), limit=size):
92 for chunk in util.filechunkiter(repo.sopener(name), limit=size):
93 93 fileobj.write(chunk)
94 94 flush = getattr(fileobj, 'flush', None)
95 95 if flush: flush()
General Comments 0
You need to be logged in to leave comments. Login now