1
2
3
4
5
6 import errno
7 import json
8 import os
9 import socket
10 import select
11 import logging
12 import datetime
13 import SocketServer
14 import threading
15
16 from lib.cuckoo.common.abstracts import ProtocolHandler
17 from lib.cuckoo.common.config import Config
18 from lib.cuckoo.common.constants import CUCKOO_ROOT
19 from lib.cuckoo.common.exceptions import CuckooOperationalError
20 from lib.cuckoo.common.exceptions import CuckooCriticalError
21 from lib.cuckoo.common.exceptions import CuckooResultError
22 from lib.cuckoo.common.netlog import BsonParser
23 from lib.cuckoo.common.utils import create_folder, Singleton
24 from lib.cuckoo.core.log import task_log_start, task_log_stop
25
26 log = logging.getLogger(__name__)
27
28 BUFSIZE = 16 * 1024
29
32
34 """Result server. Singleton!
35
36 This class handles results coming back from the analysis machines.
37 """
38
39 __metaclass__ = Singleton
40
41 allow_reuse_address = True
42 daemon_threads = True
43
45 self.cfg = Config()
46 self.analysistasks = {}
47 self.analysishandlers = {}
48
49 ip = self.cfg.resultserver.ip
50 self.port = int(self.cfg.resultserver.port)
51 while True:
52 try:
53 server_addr = ip, self.port
54 SocketServer.ThreadingTCPServer.__init__(
55 self, server_addr, ResultHandler, *args, **kwargs
56 )
57 except Exception as e:
58 if e.errno == errno.EADDRINUSE:
59 if self.cfg.resultserver.get("force_port"):
60 raise CuckooCriticalError(
61 "Cannot bind ResultServer on port %d, "
62 "bailing." % self.port
63 )
64 else:
65 log.warning("Cannot bind ResultServer on port %s, "
66 "trying another port.", self.port)
67 self.port += 1
68 elif e.errno == errno.EADDRNOTAVAIL:
69 raise CuckooCriticalError(
70 "Unable to bind ResultServer on %s:%s %s. This "
71 "usually happens when you start Cuckoo without "
72 "bringing up the virtual interface associated with "
73 "the ResultServer IP address. Please refer to "
74 "http://docs.cuckoosandbox.org/en/latest/faq/#troubles-problem"
75 " for more information." % (ip, self.port, e)
76 )
77 else:
78 raise CuckooCriticalError(
79 "Unable to bind ResultServer on %s:%s: %s" %
80 (ip, self.port, e)
81 )
82 else:
83 log.debug("ResultServer running on %s:%s.", ip, self.port)
84 self.servethread = threading.Thread(target=self.serve_forever)
85 self.servethread.setDaemon(True)
86 self.servethread.start()
87 break
88
90 """Register a task/machine with the ResultServer."""
91 self.analysistasks[machine.ip] = task, machine
92 self.analysishandlers[task.id] = []
93
95 """Delete ResultServer state and wait for pending RequestHandlers."""
96 x = self.analysistasks.pop(machine.ip, None)
97 if not x:
98 log.warning("ResultServer did not have %s in its task info.",
99 machine.ip)
100 handlers = self.analysishandlers.pop(task.id, None)
101 for h in handlers:
102 h.end_request.set()
103 h.done_event.wait()
104
106 """Register a RequestHandler so that we can later wait for it."""
107 task, machine = self.get_ctx_for_ip(handler.client_address[0])
108 if not task or not machine:
109 return False
110
111 self.analysishandlers[task.id].append(handler)
112
114 """Return state for this IP's task."""
115 x = self.analysistasks.get(ip)
116 if not x:
117 log.critical("ResultServer unable to map ip to context: %s.", ip)
118 return None, None
119
120 return x
121
129
131 """Result handler.
132
133 This handler speaks our analysis log network protocol.
134 """
135
137 self.rawlogfd = None
138 self.protocol = None
139 self.startbuf = ""
140 self.end_request = threading.Event()
141 self.done_event = threading.Event()
142 self.server.register_handler(self)
143
144 if hasattr(select, "poll"):
145 self.poll = select.poll()
146 self.poll.register(self.request, select.POLLIN)
147 else:
148 self.poll = None
149
151 self.done_event.set()
152
153 if self.protocol:
154 self.protocol.close()
155 if self.rawlogfd:
156 self.rawlogfd.close()
157
159 while True:
160 if self.end_request.isSet():
161 return False
162
163 if self.poll:
164 if self.poll.poll(1000):
165 return True
166 else:
167 rs, _, _ = select.select([self.request], [], [], 1)
168 if rs:
169 return True
170
171 - def seek(self, pos):
173
174 - def read(self, length):
175 buf = ""
176 while len(buf) < length:
177 if not self.wait_sock_or_end():
178 raise Disconnect()
179 tmp = self.request.recv(length-len(buf))
180 if not tmp:
181 raise Disconnect()
182 buf += tmp
183
184 if isinstance(self.protocol, BsonParser):
185 if self.rawlogfd:
186 self.rawlogfd.write(buf)
187 else:
188 self.startbuf += buf
189
190 if len(self.startbuf) > 0x10000:
191 raise CuckooResultError(
192 "Somebody is knowingly overflowing the startbuf "
193 "buffer, possibly to use excessive amounts of memory."
194 )
195
196 return buf
197
205
207 buf = ""
208 while "\n" not in buf:
209 buf += self.read(1)
210
211 if strip:
212 buf = buf.strip()
213
214 return buf
215
217 protocol = self.read_newline(strip=True)
218
219
220 if " " in protocol:
221 command, version = protocol.split()
222 version = int(version)
223 else:
224 command, version = protocol, None
225
226 if command == "BSON":
227 self.protocol = BsonParser(self, version)
228 elif command == "FILE":
229 self.protocol = FileUpload(self, version)
230 elif command == "LOG":
231 self.protocol = LogHandler(self, version)
232 else:
233 raise CuckooOperationalError(
234 "Netlog failure, unknown protocol requested."
235 )
236
237 self.protocol.init()
238
273
275 pid = event["pid"]
276 ppid = event["ppid"]
277 procname = event["process_name"]
278
279 if self.rawlogfd:
280 log.debug(
281 "ResultServer got a new process message but already "
282 "has pid %d ppid %s procname %s.", pid, ppid, procname
283 )
284 raise CuckooResultError(
285 "ResultServer connection state inconsistent."
286 )
287
288 if not isinstance(pid, (int, long)):
289 raise CuckooResultError(
290 "An invalid process identifier has been provided, this "
291 "could be a potential security hazard."
292 )
293
294
295 if event["track"]:
296 log.debug(
297 "New process (pid=%s, ppid=%s, name=%s)",
298 pid, ppid, procname
299 )
300
301 filepath = os.path.join(self.storagepath, "logs", "%s.bson" % pid)
302 self.rawlogfd = open(filepath, "wb")
303 self.rawlogfd.write(self.startbuf)
304
306 folders = "shots", "files", "logs", "buffer"
307
308 for folder in folders:
309 try:
310 create_folder(self.storagepath, folder=folder)
311 except CuckooOperationalError:
312 log.error("Unable to create folder %s" % folder)
313 return False
314
316 RESTRICTED_DIRECTORIES = "reports/",
317 lock = threading.Lock()
318
320 self.upload_max_size = \
321 self.handler.server.cfg.resultserver.upload_max_size
322 self.storagepath = self.handler.storagepath
323 self.fd = None
324
325 self.filelog = os.path.join(self.handler.storagepath, "files.json")
326
328
329
330
331 dump_path = self.handler.read_newline(strip=True).replace("\\", "/")
332
333 if self.version >= 2:
334 filepath = self.handler.read_newline(strip=True)
335 pids = map(int, self.handler.read_newline(strip=True).split())
336 else:
337 filepath, pids = None, []
338
339 log.debug("File upload request for %s", dump_path)
340
341 dir_part, filename = os.path.split(dump_path)
342
343 if "./" in dump_path or not dir_part or dump_path.startswith("/"):
344 raise CuckooOperationalError(
345 "FileUpload failure, banned path: %s" % dump_path
346 )
347
348 for restricted in self.RESTRICTED_DIRECTORIES:
349 if restricted in dir_part:
350 raise CuckooOperationalError(
351 "FileUpload failure, banned path."
352 )
353
354 try:
355 create_folder(self.storagepath, dir_part)
356 except CuckooOperationalError:
357 log.error("Unable to create folder %s", dir_part)
358 return
359
360 file_path = os.path.join(self.storagepath, dump_path)
361
362 if not file_path.startswith(self.storagepath):
363 raise CuckooOperationalError(
364 "FileUpload failure, path sanitization failed."
365 )
366
367 if os.path.exists(file_path):
368 log.warning(
369 "Analyzer tried to overwrite an existing file, "
370 "closing connection."
371 )
372 return
373
374 self.fd = open(file_path, "wb")
375 chunk = self.handler.read_any()
376 while chunk:
377 self.fd.write(chunk)
378
379 if self.fd.tell() >= self.upload_max_size:
380 log.warning(
381 "Uploaded file length larger than upload_max_size, "
382 "stopping upload."
383 )
384 self.fd.write("... (truncated)")
385 break
386
387 try:
388 chunk = self.handler.read_any()
389 except:
390 break
391
392 self.lock.acquire()
393
394 with open(self.filelog, "a+b") as f:
395 f.write("%s\n" % json.dumps({
396 "path": dump_path,
397 "filepath": filepath,
398 "pids": pids,
399 }))
400
401 self.lock.release()
402
403 log.debug("Uploaded file length: %s", self.fd.tell())
404 return
405 yield
406
408 if self.fd:
409 self.fd.close()
410
413 self.logpath = os.path.join(self.handler.storagepath, "analysis.log")
414 self.fd = self._open()
415 log.debug("LogHandler for live analysis.log initialized.")
416
418 if not self.fd:
419 return
420
421 while True:
422 try:
423 buf = self.handler.read_newline(strip=False)
424 except Disconnect:
425 break
426
427 if not buf:
428 break
429
430 self.fd.write(buf)
431 self.fd.flush()
432
433 return
434 yield
435
437 if self.fd:
438 self.fd.close()
439
441 if not os.path.exists(self.logpath):
442 return open(self.logpath, "wb")
443
444 log.debug("Log analysis.log already existing, appending data.")
445 fd = open(self.logpath, "ab")
446
447
448
449
450 now = datetime.datetime.now()
451 print >>fd, "\n%s,%03.0f [lib.core.resultserver] WARNING: This log file was re-opened, log entries will be appended." % (
452 now.strftime("%Y-%m-%d %H:%M:%S"), now.microsecond / 1000.0
453 )
454
455 return fd
456