# Enable or disable the available reporting modules [on/off]. # If you add a custom reporting module to your Cuckoo setup, you have to add # a dedicated entry in this file, or it won't be executed. # You can also add additional options under the section of your module and # they will be available in your Python class. [cents] enabled = no on_demand = no # starting signature id for created Suricata rules start_sid = 1000000 [mitre] enabled = no # https://github.com/geekscrapy/binGraph # requires -> apt-get install python-tk [bingraph] enabled = yes on_demand = yes binary = yes # geenrate bingraphs for cape/procdumps cape = yes procdump = yes [pcap2cert] # jk edit, hat bei processing 200, sek dedauert... enabled = no [litereport] enabled = no keys_to_copy = CAPE procdump info signatures dropped static target network shot malscore ttps behavior_keys_to_copy = processtree summary [jsondump] enabled = yes # use the c-optimized JSON encoder, requires fitting entire JSON results in memory ram_boost = no indent = 4 encoding = latin-1 [reporthtml] # required for the WSGI interface enabled = yes [reporthtmlsummary] # much smaller, faster report generation, omits API logs and is non-interactive enabled = yes [reportpdf] # Note that this requires reporthtmlsummary to be enabled above as well enabled = yes [maec41] enabled = no mode = overview processtree = true output_handles = false static = true strings = true virustotal = true deduplicate = true [maec5] enabled = no [mongodb] enabled = yes host = 127.0.0.1 port = 27017 db = cuckoo # Set those values if you are using mongodb authentication # username = # password = # authsource = cuckoo # Automatically delete large dict values that exceed mongos 16MB limitation # Note: This only deletes dict keys from data stored in MongoDB. You would # still get the full dataset if you parsed the results dict in another # reporting module or from the jsondump module. fix_large_docs = yes # Use ElasticSearch as the "database" which powers Django. # NOTE: If this is enabled, MongoDB should not be enabled, unless # search only option is set to yes. Then elastic search is only used for /search web page. [elasticsearchdb] enabled = no searchonly = no host = 127.0.0.1 port = 9200 # The report data is indexed in the form of {{index-yyyy.mm.dd}} # so the below index configuration option is actually an index 'prefix'. index = cuckoo # username = # password = # use_ssl = # verify_certs = [retention] enabled = no # run at most once every this many hours (unless reporting.conf is modified) run_every = 6 # The amount of days old a task needs to be before deleting data # Set a value to no to never delete it memory = 14 procmemory = 62 pcap = 62 sortedpcap = 14 bsonlogs = 62 dropped = 62 screencaps = 62 reports = 62 mongo = 731 elastic = no [syslog] enabled = no # IP of your syslog server/listener host = x.x.x.x # Port of your syslog server/listener port = 514 # Protocol to send data over protocol = tcp # Store a logfile? [in reports directory] logfile = yes # if yes, what logname? [Default: syslog.txt] logname = syslog.log [moloch] enabled = no base = https://172.18.100.105:8005/ node = cuckoo3 capture = /data/moloch/bin/moloch-capture captureconf = /data/moloch/etc/config.ini user = admin pass = admin realm = Moloch [resubmitexe] enabled = no resublimit = 5 [compression] enabled = no zipmemdump = yes zipmemstrings = yes zipprocdump = yes zipprocstrings = yes [misp] enabled = no apikey = url = #Make event published after creation? published = no # minimal malscore, by default all min_malscore = 0 # by default 5 threads threads = # this will retrieve information for iocs # and activate misp report download from webgui extend_context = no # upload iocs from cuckoo to MISP upload_iocs = no distribution = 0 threat_level_id = 2 analysis = 2 # Sections to report # Analysis ID will be appended, change title = Iocs from cuckoo analysis: network = no ids_files = no dropped = no registry = no mutexes = no [callback] enabled = no # will send as post data {"task_id":X} # can be coma separated urls url = http://IP/callback [distributed] enabled = no # save results on master, not analyze binaries master_storage_only = no remove_task_on_worker = no failed_clean = no # distributed cuckoo database, to store nodes and tasks info db = sqlite:///dist.db # tried before declare node as dead and deactivate it dead_count = 5 # number of threads witch will retrieve files see api.py for dist dist_threads = 4 # Tags breaks distributed logic, # don't activate it till you really know what you do enable_tags = no # Fetch data over REST API or NFS, see docs how to setup NFS nfs = no # CAPE auto-submission of detected samples # with a selected CAPE package. [submitCAPE] enabled = yes # check root keyword, if found not resubmit, allows custom extractions keyword = tr_extractor # distributed CAPE, only enable on clients distributed = no # rest api url to master node url = http://IP:8000/api/tasks/create/file/ # Compress results including CAPE output # to help avoid reaching the hard 16MB MongoDB limit. [compressresults] enabled = no [tmpfsclean] enabled = no key = tr_extractor # This calls the specified command, pointing it at the report.json as # well as setting $ENV{CAPE_TASK_ID} to the task ID of the run in question. # [zexecreport] enabled=no command=/foo/bar.pl # run statistics, this may take more times. [runstatistics] enabled = no